aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio6
-rw-r--r--Documentation/DocBook/alsa-driver-api.tmpl2
-rw-r--r--Documentation/DocBook/drm.tmpl2
-rw-r--r--Documentation/arm/SPEAr/overview.txt2
-rw-r--r--Documentation/arm/sunxi/README18
-rw-r--r--Documentation/device-mapper/cache.txt6
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt26
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti/emif.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/cs4349.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/ics43432.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/max98357a.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-max98090.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-max98090.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-rt5645.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt155
-rw-r--r--Documentation/devicetree/bindings/spi/spi-ath79.txt6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/hwmon/nct79044
-rw-r--r--Documentation/input/alps.txt6
-rw-r--r--Documentation/kbuild/makefiles.txt8
-rw-r--r--Documentation/power/swsusp.txt13
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py21
-rw-r--r--MAINTAINERS167
-rw-r--r--Makefile20
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arc/Kconfig16
-rw-r--r--arch/arc/Makefile13
-rw-r--r--arch/arc/boot/dts/axc003.dtsi2
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi2
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/bitops.h35
-rw-r--r--arch/arc/include/asm/futex.h48
-rw-r--r--arch/arc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arc/include/asm/ptrace.h52
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/intc-arcv2.c1
-rw-r--r--arch/arc/kernel/intc-compact.c1
-rw-r--r--arch/arc/kernel/mcip.c23
-rw-r--r--arch/arc/kernel/setup.c27
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/kernel/troubleshoot.c1
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/mm/cache.c12
-rw-r--r--arch/arc/mm/dma.c4
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts4
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts16
-rw-r--r--arch/arm/boot/dts/am4372.dtsi7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts4
-rw-r--r--arch/arm/boot/dts/atlas7.dtsi1042
-rw-r--r--arch/arm/boot/dts/cros-ec-keyboard.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts5
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/dra72-evm.dts5
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx23.dtsi1
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts5
-rw-r--r--arch/arm/boot/dts/imx27.dtsi12
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi9
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts4
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts8
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-gk802.dts3
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-hummingboard.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-tx6.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts10
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts4
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2e.dtsi18
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk.dtsi11
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l.dtsi16
-rw-r--r--arch/arm/boot/dts/keystone.dtsi14
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4.dtsi5
-rw-r--r--arch/arm/boot/dts/omap5.dtsi5
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts26
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi2
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi2
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear300.dtsi2
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear320.dtsi2
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-ccu8540.dts7
-rw-r--r--arch/arm/boot/dts/ste-ccu9540.dts7
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi59
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-stuib.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-tvk.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60.dtsi5
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-stuib.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-tvk.dts7
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus.dtsi25
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts25
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts25
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig6
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/io.h75
-rw-r--r--arch/arm/include/asm/memory.h4
-rw-r--r--arch/arm/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm/include/asm/pgtable-2level.h31
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/reboot.c2
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/lib/memcpy.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c2
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-imx/gpc.c27
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/dma.c1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-prima2/rtciobrg.c48
-rw-r--r--arch/arm/mach-pxa/capc7117.c3
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c3
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c3
-rw-r--r--arch/arm/mach-pxa/em-x270.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c3
-rw-r--r--arch/arm/mach-pxa/trizeps4.c3
-rw-r--r--arch/arm/mach-pxa/vpac270.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear/pl080.c2
-rw-r--r--arch/arm/mach-spear/pl080.h2
-rw-r--r--arch/arm/mach-spear/restart.c2
-rw-r--r--arch/arm/mach-spear/spear1310.c2
-rw-r--r--arch/arm/mach-spear/spear1340.c2
-rw-r--r--arch/arm/mach-spear/spear13xx.c2
-rw-r--r--arch/arm/mach-spear/spear300.c2
-rw-r--r--arch/arm/mach-spear/spear310.c2
-rw-r--r--arch/arm/mach-spear/spear320.c2
-rw-r--r--arch/arm/mach-spear/spear3xx.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig2
-rw-r--r--arch/arm/mach-sunxi/sunxi.c5
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/ioremap.c33
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/nommu.c39
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/net/bpf_jit_32.c57
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm/vdso/vdsomunge.c56
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts10
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/Makefile1
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts191
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi9
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h8
-rw-r--r--arch/arm64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S9
-rw-r--r--arch/arm64/kernel/entry32.S2
-rw-r--r--arch/arm64/kernel/irq.c4
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/avr32/kernel/time.c65
-rw-r--r--arch/avr32/mach-at32ap/clock.c20
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/frv/include/asm/Kbuild1
-rw-r--r--arch/frv/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/io.h5
-rw-r--r--arch/m32r/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/m68k/Kconfig.cpu49
-rw-r--r--arch/m68k/configs/m5208evb_defconfig22
-rw-r--r--arch/m68k/configs/m5249evb_defconfig17
-rw-r--r--arch/m68k/configs/m5272c3_defconfig14
-rw-r--r--arch/m68k/configs/m5275evb_defconfig19
-rw-r--r--arch/m68k/configs/m5307c3_defconfig21
-rw-r--r--arch/m68k/configs/m5407c3_defconfig17
-rw-r--r--arch/m68k/configs/m5475evb_defconfig9
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/coldfire.h2
-rw-r--r--arch/m68k/include/asm/io_mm.h3
-rw-r--r--arch/m68k/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/mips/Kconfig9
-rw-r--r--arch/mips/Makefile7
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h2
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h3
-rw-r--r--arch/mips/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h4
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cps-vec.S96
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall32-o32.S37
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S35
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp-cps.c6
-rw-r--r--arch/mips/kernel/smp.c54
-rw-r--r--arch/mips/kernel/traps.c21
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/common/bonito-irq.c2
-rw-r--r--arch/mips/loongson64/common/cmdline.c2
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c2
-rw-r--r--arch/mips/loongson64/common/env.c2
-rw-r--r--arch/mips/loongson64/common/irq.c2
-rw-r--r--arch/mips/loongson64/common/setup.c2
-rw-r--r--arch/mips/loongson64/fuloong-2e/irq.c2
-rw-r--r--arch/mips/loongson64/lemote-2f/clock.c4
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c2
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c36
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/init.c8
-rw-r--r--arch/mips/pistachio/time.c6
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/Kconfig5
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/common/bus_watcher.c5
-rw-r--r--arch/mips/sibyte/sb1250/setup.c2
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/mn10300/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/openrisc/Kconfig4
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/parisc/include/asm/pgalloc.h3
-rw-r--r--arch/parisc/include/asm/pgtable.h55
-rw-r--r--arch/parisc/include/asm/tlbflush.h53
-rw-r--r--arch/parisc/kernel/cache.c105
-rw-r--r--arch/parisc/kernel/entry.S163
-rw-r--r--arch/parisc/kernel/traps.c4
-rw-r--r--arch/powerpc/kernel/idle_power7.S31
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/mm/fault.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c16
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c1
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/ctl_reg.h5
-rw-r--r--arch/s390/include/asm/hugetlb.h1
-rw-r--r--arch/s390/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/s390/include/asm/page.h8
-rw-r--r--arch/s390/include/asm/perf_event.h8
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/cache.c2
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/nmi.c51
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/sclp.S4
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/s390/oprofile/init.c1
-rw-r--r--arch/score/include/asm/Kbuild1
-rw-r--r--arch/score/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/tile/lib/memcpy_user_64.c4
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Kconfig.debug12
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/entry/entry_64.S299
-rw-r--r--arch/x86/entry/entry_64_compat.S17
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/espfix.h2
-rw-r--r--arch/x86/include/asm/fpu/types.h72
-rw-r--r--arch/x86/include/asm/intel_pmc_ipc.h27
-rw-r--r--arch/x86/include/asm/kasan.h8
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h56
-rw-r--r--arch/x86/include/asm/processor.h10
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/asm/switch_to.h12
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c12
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c16
-rw-r--r--arch/x86/kernel/early_printk.c4
-rw-r--r--arch/x86/kernel/espfix_64.c28
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/init.c53
-rw-r--r--arch/x86/kernel/head64.c10
-rw-r--r--arch/x86/kernel/head_64.S29
-rw-r--r--arch/x86/kernel/irq.c20
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/nmi.c123
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/smpboot.c38
-rw-r--r--arch/x86/kernel/step.c8
-rw-r--r--arch/x86/kernel/tsc.c11
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/iommu.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c110
-rw-r--r--arch/x86/kvm/vmx.c16
-rw-r--r--arch/x86/kvm/x86.c33
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/kasan_init_64.c47
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/mpx.c24
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/asm/mm-arch-hooks.h15
-rw-r--r--block/bio-integrity.c4
-rw-r--r--block/bio.c17
-rw-r--r--block/blk-cgroup.c146
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-settings.c4
-rw-r--r--crypto/authencesn.c44
-rw-r--r--drivers/acpi/acpi_lpss.c7
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/nfit.c134
-rw-r--r--drivers/acpi/nfit.h20
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/resource.c186
-rw-r--r--drivers/acpi/scan.c32
-rw-r--r--drivers/acpi/video_detect.c16
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci_brcmstb.c6
-rw-r--r--drivers/ata/ahci_platform.c9
-rw-r--r--drivers/ata/libata-core.c45
-rw-r--r--drivers/ata/libata-eh.c105
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c24
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/libata.h6
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/ata/sata_sx4.c16
-rw-r--r--drivers/base/firmware_class.c16
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/power/wakeirq.c12
-rw-r--r--drivers/base/power/wakeup.c31
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/base/regmap/regmap.c73
-rw-r--r--drivers/block/null_blk.c18
-rw-r--r--drivers/block/nvme-core.c13
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c128
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/btbcm.c11
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c8
-rw-r--r--drivers/clk/at91/clk-h32mx.c4
-rw-r--r--drivers/clk/at91/clk-main.c4
-rw-r--r--drivers/clk/at91/clk-master.c8
-rw-r--r--drivers/clk/at91/clk-pll.c8
-rw-r--r--drivers/clk/at91/clk-system.c8
-rw-r--r--drivers/clk/at91/clk-utmi.c8
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c6
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c13
-rw-r--r--drivers/clk/clk-stm32f4.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c26
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c9
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c2
-rw-r--r--drivers/clk/st/clk-flexgen.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c12
-rw-r--r--drivers/clk/st/clkgen-mux.c10
-rw-r--r--drivers/clk/st/clkgen-pll.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/clocksource/timer-imx-gpt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c118
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c6
-rw-r--r--drivers/cpufreq/freq_table.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c9
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c6
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c7
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c17
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c70
-rw-r--r--drivers/crypto/nx/nx-sha256.c70
-rw-r--r--drivers/crypto/nx/nx-sha512.c72
-rw-r--r--drivers/crypto/nx/nx.c3
-rw-r--r--drivers/crypto/nx/nx.h14
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/dma/at_hdmac.c132
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c26
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/extcon/extcon-palmas.c13
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/firmware/broadcom/bcm47xx_nvram.c2
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/gpio/gpio-brcmstb.c14
-rw-r--r--drivers/gpio/gpio-davinci.c6
-rw-r--r--drivers/gpio/gpio-max732x.c1
-rw-r--r--drivers/gpio/gpio-omap.c5
-rw-r--r--drivers/gpio/gpio-pca953x.c4
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-zynq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c121
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c14
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c12
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/drm_ioc32.c60
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c21
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c35
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c29
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c45
-rw-r--r--drivers/gpu/drm/i915/intel_display.c77
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c35
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c26
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c336
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c40
-rw-r--r--drivers/gpu/drm/radeon/si.c336
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c67
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c3
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c76
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c58
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c15
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c1
-rw-r--r--drivers/i2c/i2c-core.c40
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c6
-rw-r--r--drivers/iio/accel/bmc150-accel.c2
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/Kconfig3
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c4
-rw-r--r--drivers/iio/adc/twl4030-madc.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c11
-rw-r--r--drivers/iio/dac/ad5624r_spi.c4
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c18
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/cm3323.c2
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/stk3310.c75
-rw-r--r--drivers/iio/light/tcs3414.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/mmc35240.c47
-rw-r--r--drivers/iio/proximity/sx9500.c28
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/temperature/tmp006.c3
-rw-r--r--drivers/infiniband/core/agent.c4
-rw-r--r--drivers/infiniband/core/cm.c61
-rw-r--r--drivers/infiniband/core/iwpm_msg.c33
-rw-r--r--drivers/infiniband/core/iwpm_util.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.h28
-rw-r--r--drivers/infiniband/core/mad.c47
-rw-r--r--drivers/infiniband/core/multicast.c8
-rw-r--r--drivers/infiniband/core/opa_smi.h4
-rw-r--r--drivers/infiniband/core/sa_query.c8
-rw-r--r--drivers/infiniband/core/smi.c37
-rw-r--r--drivers/infiniband/core/smi.h4
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c34
-rw-r--r--drivers/infiniband/hw/mlx4/main.c33
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c58
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c56
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h53
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h29
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c33
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c49
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c23
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c71
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/drv260x.c6
-rw-r--r--drivers/input/misc/drv2665.c2
-rw-r--r--drivers/input/misc/drv2667.c4
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/elan_i2c_core.c12
-rw-r--r--drivers/input/mouse/elantech.c35
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c6
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/iommu/amd_iommu.c98
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/irqchip/irq-crossbar.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c111
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/io.c1
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c14
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c4
-rw-r--r--drivers/md/dm-cache-target.c37
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c53
-rw-r--r--drivers/md/dm.c39
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c23
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c9
-rw-r--r--drivers/md/raid1.c19
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/dvb-frontends/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/Kconfig1
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/media/pci/mantis/mantis_dma.c5
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c116
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c122
-rw-r--r--drivers/media/rc/nuvoton-cir.c127
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-core-priv.h36
-rw-r--r--drivers/media/rc/rc-ir-raw.c139
-rw-r--r--drivers/media/rc/rc-loopback.c36
-rw-r--r--drivers/media/rc/rc-main.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c40
-rw-r--r--drivers/memory/omap-gpmc.c14
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/arizona-core.c18
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/wm5102-tables.c6
-rw-r--r--drivers/mfd/wm5110-tables.c6
-rw-r--r--drivers/mfd/wm8994-core.c8
-rw-r--r--drivers/mfd/wm8997-tables.c2
-rw-r--r--drivers/misc/cxl/api.c12
-rw-r--r--drivers/misc/cxl/context.c14
-rw-r--r--drivers/misc/cxl/main.c2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/vphb.c3
-rw-r--r--drivers/misc/eeprom/at24.c3
-rw-r--r--drivers/misc/mei/bus.c16
-rw-r--r--drivers/misc/mei/init.c2
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mei/nfc.c3
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/net/bonding/bond_main.c86
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/c_can/c_can.c10
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/rcar_can.c16
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/can/vcan.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c21
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c17
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c191
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c99
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c109
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c350
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/sfc/ef10.c172
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c59
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c14
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/tx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c51
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/hamradio/bpqether.c1
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/smsc.c31
-rw-r--r--drivers/net/ppp/ppp_generic.c78
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c63
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c191
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wan/z85230.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c74
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c68
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c201
-rw-r--r--drivers/nvdimm/bus.c11
-rw-r--r--drivers/nvdimm/region_devs.c5
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-berlin-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/phy-ti-pipe3.c217
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/platform/x86/dell-laptop.c171
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c83
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/pnp/system.c35
-rw-r--r--drivers/regulator/88pm800.c2
-rw-r--r--drivers/regulator/core.c19
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c4
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c36
-rw-r--r--drivers/s390/block/dasd_alias.c3
-rw-r--r--drivers/s390/char/sclp_early.c1
-rw-r--r--drivers/s390/crypto/zcrypt_api.c7
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c763
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c28
-rw-r--r--drivers/scsi/scsi_error.c33
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_pm.c22
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_srp.c3
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/board/Kconfig2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/vt6655/device_main.c7
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c52
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c34
-rw-r--r--drivers/target/target_core_configfs.c49
-rw-r--r--drivers/target/target_core_hba.c10
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_spc.c53
-rw-r--r--drivers/thermal/cpu_cooling.c73
-rw-r--r--drivers/thermal/hisi_thermal.c1
-rw-r--r--drivers/thermal/power_allocator.c34
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/tty/n_tty.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/imx.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c55
-rw-r--r--drivers/usb/dwc2/core.h9
-rw-r--r--drivers/usb/dwc2/hcd.c55
-rw-r--r--drivers/usb/dwc2/hcd.h5
-rw-r--r--drivers/usb/dwc2/hcd_queue.c49
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/gadget/composite.c11
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c16
-rw-r--r--drivers/usb/gadget/function/f_midi.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c10
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c15
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c5
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/musb_virthub.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c3
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/mos7720.c253
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/usb-serial.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/vhost.c65
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/stifb.c40
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/9p/vfs_inode.c3
-rw-r--r--fs/9p/vfs_inode_dotl.c3
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c44
-rw-r--r--fs/btrfs/extent-tree.c34
-rw-r--r--fs/btrfs/inode-map.c17
-rw-r--r--fs/btrfs/inode.c94
-rw-r--r--fs/btrfs/ioctl.c259
-rw-r--r--fs/btrfs/ordered-data.c5
-rw-r--r--fs/btrfs/qgroup.c54
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c39
-rw-r--r--fs/btrfs/transaction.c7
-rw-r--r--fs/btrfs/tree-log.c226
-rw-r--r--fs/btrfs/volumes.c50
-rw-r--r--fs/ceph/caps.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/configfs/item.c4
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c20
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/inode.c22
-rw-r--r--fs/ext4/ioctl.c1
-rw-r--r--fs/ext4/mballoc.c16
-rw-r--r--fs/ext4/migrate.c17
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/gc.c30
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/hpfs/alloc.c95
-rw-r--r--fs/hpfs/dir.c1
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hpfs/hpfs_fn.h4
-rw-r--r--fs/hpfs/super.c47
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/inode.c4
-rw-r--r--fs/jfs/ioctl.c3
-rw-r--r--fs/jfs/namei.c27
-rw-r--r--fs/locks.c38
-rw-r--r--fs/namei.c9
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4proc.c54
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c101
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4state.c12
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/nilfs2/ioctl.c1
-rw-r--r--fs/notify/mark.c30
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/proc/Kconfig6
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/kcore.c4
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/udf/inode.c19
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c44
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_log_recover.c11
-rw-r--r--include/asm-generic/mm-arch-hooks.h16
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_edid.h19
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/acpi.h24
-rw-r--r--include/linux/amba/sp810.h2
-rw-r--r--include/linux/ata.h19
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/buffer_head.h7
-rw-r--r--include/linux/can/skb.h2
-rw-r--r--include/linux/ceph/messenger.h3
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/configfs.h3
-rw-r--r--include/linux/cper.h22
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/device.h15
-rw-r--r--include/linux/fs.h35
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/gpio/driver.h2
-rw-r--r--include/linux/hid-sensor-hub.h1
-rw-r--r--include/linux/hugetlb.h17
-rw-r--r--include/linux/init.h78
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqdesc.h7
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/kobject.h5
-rw-r--r--include/linux/kvm_host.h18
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mm.h28
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmiotrace.h2
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h84
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/page_owner.h13
-rw-r--r--include/linux/pata_arasan_cf_data.h2
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/regmap.h28
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h4
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/usb/cdc_ncm.h7
-rw-r--r--include/media/rc-core.h7
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/rdma/ib_verbs.h20
-rw-r--r--include/scsi/scsi_eh.h1
-rw-r--r--include/scsi/scsi_transport_srp.h1
-rw-r--r--include/sound/ac97_codec.h2
-rw-r--r--include/sound/rcar_snd.h14
-rw-r--r--include/sound/soc-dapm.h84
-rw-r--r--include/sound/soc-topology.h12
-rw-r--r--include/sound/soc.h29
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/trace/events/asoc.h53
-rw-r--r--include/uapi/drm/amdgpu_drm.h4
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--include/uapi/sound/asoc.h45
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/sem.c47
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/auditsc.c3
-rw-r--r--kernel/cpu.c13
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/events/core.c99
-rw-r--r--kernel/events/internal.h10
-rw-r--r--kernel/events/ring_buffer.c37
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/irq/chip.c19
-rw-r--r--kernel/irq/internals.h4
-rw-r--r--kernel/irq/resend.c18
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/module.c9
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/time/clockevents.c24
-rw-r--r--kernel/time/tick-broadcast.c164
-rw-r--r--kernel/time/tick-common.c22
-rw-r--r--kernel/time/tick-sched.h10
-rw-r--r--kernel/time/timer.c4
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_branch.c17
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/dma-debug.c3
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/iommu-common.c2
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/rhashtable.c4
-rw-r--r--mm/cma.h2
-rw-r--r--mm/cma_debug.c11
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory-failure.c54
-rw-r--r--mm/memory.c20
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/page_owner.c7
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/batman-adv/distributed-arp-table.c18
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c28
-rw-r--r--net/bridge/br_mdb.c18
-rw-r--r--net/bridge/br_multicast.c91
-rw-r--r--net/bridge/br_netfilter_hooks.c16
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/bridge/br_netlink.c16
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/can/af_can.c12
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c7
-rw-r--r--net/ceph/ceph_common.c16
-rw-r--r--net/ceph/messenger.c24
-rw-r--r--net/core/datagram.c57
-rw-r--r--net/core/dev.c45
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/gen_estimator.c13
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/rtnetlink.c198
-rw-r--r--net/core/skbuff.c39
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dsa/dsa.c6
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c9
-rw-r--r--net/ipv4/igmp.c33
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/inet_hashtables.c11
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/netfilter/arp_tables.c25
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c10
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/mcast_snoop.c33
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c90
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/debugfs_netdev.c1
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/mesh_plink.c5
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/rc80211_minstrel.c11
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c71
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nf_synproxy_core.c11
-rw-r--r--net/netfilter/nfnetlink.c38
-rw-r--r--net/netfilter/xt_CT.c13
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/netlink/af_netlink.c86
-rw-r--r--net/openvswitch/actions.c16
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/ib_rdma.c4
-rw-r--r--net/rds/info.c2
-rw-r--r--net/rds/transport.c2
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_fq_codel.c35
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--net/switchdev/switchdev.c12
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/wireless/chan.c45
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/trace.h11
-rw-r--r--samples/trace_events/trace-events-sample.h7
-rwxr-xr-xscripts/checkpatch.pl2
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--scripts/mod/devicetable-offsets.c2
-rw-r--r--scripts/mod/file2alias.c32
-rw-r--r--scripts/mod/modpost.c3
-rw-r--r--security/keys/keyring.c8
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--security/selinux/ss/ebitmap.c6
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/ac97_bus.c62
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c8
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c9
-rw-r--r--sound/hda/ext/hdac_ext_controller.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/hda/hdac_i915.c5
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c4
-rw-r--r--sound/pci/hda/patch_realtek.c137
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/Kconfig4
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c2
-rw-r--r--sound/soc/au1x/dbdma2.c11
-rw-r--r--sound/soc/au1x/dma.c11
-rw-r--r--sound/soc/au1x/psc-i2s.c16
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c10
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c10
-rw-r--r--sound/soc/blackfin/bfin-eval-adau1x61.c1
-rw-r--r--sound/soc/codecs/88pm860x-codec.c4
-rw-r--r--sound/soc/codecs/Kconfig18
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/ad1980.c36
-rw-r--r--sound/soc/codecs/adav80x.c3
-rw-r--r--sound/soc/codecs/ak4642.c33
-rw-r--r--sound/soc/codecs/alc5632.c2
-rw-r--r--sound/soc/codecs/arizona.c129
-rw-r--r--sound/soc/codecs/arizona.h20
-rw-r--r--sound/soc/codecs/cs35l32.c59
-rw-r--r--sound/soc/codecs/cs35l32.h2
-rw-r--r--sound/soc/codecs/cs4265.c28
-rw-r--r--sound/soc/codecs/cs42l52.c57
-rw-r--r--sound/soc/codecs/cs42l56.c49
-rw-r--r--sound/soc/codecs/cs42l73.c100
-rw-r--r--sound/soc/codecs/cs4349.c392
-rw-r--r--sound/soc/codecs/cs4349.h136
-rw-r--r--sound/soc/codecs/da7210.c6
-rw-r--r--sound/soc/codecs/da7213.c2
-rw-r--r--sound/soc/codecs/da732x.c14
-rw-r--r--sound/soc/codecs/da9055.c3
-rw-r--r--sound/soc/codecs/gtm601.c95
-rw-r--r--sound/soc/codecs/ics43432.c76
-rw-r--r--sound/soc/codecs/isabelle.c10
-rw-r--r--sound/soc/codecs/lm49453.c18
-rw-r--r--sound/soc/codecs/max9768.c63
-rw-r--r--sound/soc/codecs/max98088.c305
-rw-r--r--sound/soc/codecs/max98088.h2
-rw-r--r--sound/soc/codecs/max98090.c128
-rw-r--r--sound/soc/codecs/max98090.h1
-rw-r--r--sound/soc/codecs/max98095.c318
-rw-r--r--sound/soc/codecs/max98357a.c25
-rw-r--r--sound/soc/codecs/max9877.c18
-rw-r--r--sound/soc/codecs/max98925.c3
-rw-r--r--sound/soc/codecs/mc13783.c6
-rw-r--r--sound/soc/codecs/ml26124.c2
-rw-r--r--sound/soc/codecs/pcm1681.c15
-rw-r--r--sound/soc/codecs/rl6231.c104
-rw-r--r--sound/soc/codecs/rl6231.h1
-rw-r--r--sound/soc/codecs/rt286.c10
-rw-r--r--sound/soc/codecs/rt298.c2
-rw-r--r--sound/soc/codecs/rt5640.c51
-rw-r--r--sound/soc/codecs/rt5645.c438
-rw-r--r--sound/soc/codecs/rt5645.h31
-rw-r--r--sound/soc/codecs/rt5651.c11
-rw-r--r--sound/soc/codecs/rt5670.c13
-rw-r--r--sound/soc/codecs/rt5677-spi.c233
-rw-r--r--sound/soc/codecs/rt5677-spi.h8
-rw-r--r--sound/soc/codecs/rt5677.c151
-rw-r--r--sound/soc/codecs/rt5677.h5
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/si476x.c2
-rw-r--r--sound/soc/codecs/sirf-audio-codec.c4
-rw-r--r--sound/soc/codecs/ssm4567.c41
-rw-r--r--sound/soc/codecs/sta529.c3
-rw-r--r--sound/soc/codecs/stac9766.c57
-rw-r--r--sound/soc/codecs/sti-sas.c628
-rw-r--r--sound/soc/codecs/tas2552.c4
-rw-r--r--sound/soc/codecs/tas571x.c2
-rw-r--r--sound/soc/codecs/tfa9879.c2
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/wm2200.c6
-rw-r--r--sound/soc/codecs/wm5100.c4
-rw-r--r--sound/soc/codecs/wm5102.c27
-rw-r--r--sound/soc/codecs/wm5110.c30
-rw-r--r--sound/soc/codecs/wm8510.c1
-rw-r--r--sound/soc/codecs/wm8523.c1
-rw-r--r--sound/soc/codecs/wm8580.c1
-rw-r--r--sound/soc/codecs/wm8962.c6
-rw-r--r--sound/soc/codecs/wm8993.c4
-rw-r--r--sound/soc/codecs/wm8994.c18
-rw-r--r--sound/soc/codecs/wm8996.c2
-rw-r--r--sound/soc/codecs/wm8997.c20
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/codecs/wm9705.c40
-rw-r--r--sound/soc/codecs/wm9712.c45
-rw-r--r--sound/soc/codecs/wm9713.c48
-rw-r--r--sound/soc/codecs/wm9713.h2
-rw-r--r--sound/soc/davinci/davinci-i2s.c25
-rw-r--r--sound/soc/davinci/davinci-mcasp.c18
-rw-r--r--sound/soc/davinci/davinci-vcif.c14
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c16
-rw-r--r--sound/soc/fsl/fsl_asrc.c25
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/fsl/fsl_sai.h15
-rw-r--r--sound/soc/fsl/fsl_spdif.c25
-rw-r--r--sound/soc/fsl/fsl_ssi.c70
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c25
-rw-r--r--sound/soc/fsl/imx-pcm.h9
-rw-r--r--sound/soc/fsl/imx-ssi.c2
-rw-r--r--sound/soc/generic/simple-card.c9
-rw-r--r--sound/soc/intel/Kconfig29
-rw-r--r--sound/soc/intel/Makefile3
-rw-r--r--sound/soc/intel/atom/sst-atom-controls.c6
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c1
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform.h1
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c23
-rw-r--r--sound/soc/intel/atom/sst/sst_ipc.c3
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/boards/byt-max98090.c1
-rw-r--r--sound/soc/intel/boards/byt-rt5640.c1
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c1
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c23
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c1
-rw-r--r--sound/soc/intel/common/sst-dsp-priv.h23
-rw-r--r--sound/soc/intel/common/sst-dsp.c71
-rw-r--r--sound/soc/intel/common/sst-dsp.h6
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/Makefile9
-rw-r--r--sound/soc/intel/skylake/skl-messages.c884
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c140
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.h106
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c916
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.c327
-rw-r--r--sound/soc/intel/skylake/skl-sst-cldma.h251
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c342
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h145
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c771
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.h125
-rw-r--r--sound/soc/intel/skylake/skl-sst.c280
-rw-r--r--sound/soc/intel/skylake/skl-topology.h286
-rw-r--r--sound/soc/intel/skylake/skl-tplg-interface.h88
-rw-r--r--sound/soc/intel/skylake/skl.c536
-rw-r--r--sound/soc/intel/skylake/skl.h84
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c4
-rw-r--r--sound/soc/mediatek/mt8173-max98090.c19
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c21
-rw-r--r--sound/soc/mediatek/mtk-afe-common.h8
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c91
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c9
-rw-r--r--sound/soc/omap/mcbsp.c20
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c10
-rw-r--r--sound/soc/omap/omap3pandora.c6
-rw-r--r--sound/soc/pxa/mmp-pcm.c9
-rw-r--r--sound/soc/pxa/pxa-ssp.c11
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c11
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c9
-rw-r--r--sound/soc/qcom/Kconfig7
-rw-r--r--sound/soc/qcom/lpass-cpu.c2
-rw-r--r--sound/soc/qcom/lpass-ipq806x.c2
-rw-r--r--sound/soc/qcom/lpass.h2
-rw-r--r--sound/soc/rockchip/Kconfig19
-rw-r--r--sound/soc/rockchip/Makefile6
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c8
-rw-r--r--sound/soc/rockchip/rockchip_max98090.c236
-rw-r--r--sound/soc/rockchip/rockchip_rt5645.c225
-rw-r--r--sound/soc/samsung/arndale_rt5631.c11
-rw-r--r--sound/soc/samsung/snow.c1
-rw-r--r--sound/soc/sh/dma-sh7760.c9
-rw-r--r--sound/soc/sh/fsi.c1
-rw-r--r--sound/soc/sh/rcar/Makefile2
-rw-r--r--sound/soc/sh/rcar/core.c195
-rw-r--r--sound/soc/sh/rcar/ctu.c171
-rw-r--r--sound/soc/sh/rcar/dma.c128
-rw-r--r--sound/soc/sh/rcar/dvc.c73
-rw-r--r--sound/soc/sh/rcar/gen.c33
-rw-r--r--sound/soc/sh/rcar/mix.c200
-rw-r--r--sound/soc/sh/rcar/rsnd.h111
-rw-r--r--sound/soc/sh/rcar/rsrc-card.c22
-rw-r--r--sound/soc/sh/rcar/src.c117
-rw-r--r--sound/soc/sh/rcar/ssi.c4
-rw-r--r--sound/soc/sh/ssi.c12
-rw-r--r--sound/soc/soc-ac97.c30
-rw-r--r--sound/soc/soc-core.c87
-rw-r--r--sound/soc/soc-dapm.c538
-rw-r--r--sound/soc/soc-pcm.c16
-rw-r--r--sound/soc/soc-topology.c87
-rw-r--r--sound/soc/spear/spdif_in.c20
-rw-r--r--sound/soc/spear/spear_pcm.c2
-rw-r--r--sound/soc/sti/Kconfig11
-rw-r--r--sound/soc/sti/Makefile4
-rw-r--r--sound/soc/sti/sti_uniperif.c254
-rw-r--r--sound/soc/sti/uniperif.h1229
-rw-r--r--sound/soc/sti/uniperif_player.c1110
-rw-r--r--sound/soc/sti/uniperif_reader.c362
-rw-r--r--sound/soc/zte/zx296702-i2s.c4
-rw-r--r--sound/soc/zte/zx296702-spdif.c4
-rw-r--r--sound/sparc/amd7930.c1
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/line6/pcm.c9
-rw-r--r--sound/usb/mixer_maps.c24
-rw-r--r--sound/usb/quirks-table.h68
-rw-r--r--tools/include/linux/compiler.h58
-rw-r--r--tools/include/linux/export.h10
-rw-r--r--tools/include/linux/rbtree.h104
-rw-r--r--tools/include/linux/rbtree_augmented.h245
-rw-r--r--tools/lib/api/Makefile2
-rw-r--r--tools/lib/hweight.c62
-rw-r--r--tools/lib/rbtree.c548
-rw-r--r--tools/lib/traceevent/Makefile2
-rw-r--r--tools/perf/MANIFEST8
-rw-r--r--tools/perf/Makefile.perf19
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-stat.c4
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/util/Build4
-rw-r--r--tools/perf/util/auxtrace.c10
-rw-r--r--tools/perf/util/include/linux/rbtree.h16
-rw-r--r--tools/perf/util/include/linux/rbtree_augmented.h2
-rw-r--r--tools/perf/util/machine.c20
-rw-r--r--tools/perf/util/python-ext-sources4
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/perf/util/symbol.h3
-rw-r--r--tools/perf/util/thread.c6
-rw-r--r--tools/perf/util/thread_map.c3
-rw-r--r--tools/perf/util/vdso.c8
-rw-r--r--tools/testing/nvdimm/Kbuild3
-rw-r--r--tools/testing/nvdimm/test/iomap.c27
-rw-r--r--tools/testing/nvdimm/test/nfit.c52
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
-rw-r--r--virt/kvm/vfio.c5
1767 files changed, 33301 insertions, 14005 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644
index 000000000000..cca6d870f7a5
--- /dev/null
+++ b/.get_maintainer.ignore
@@ -0,0 +1 @@
Christoph Hellwig <hch@lst.de>
diff --git a/.mailmap b/.mailmap
index 977f958eedbe..4b31af54ccd5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
17Al Viro <viro@ftp.linux.org.uk> 17Al Viro <viro@ftp.linux.org.uk>
18Al Viro <viro@zenIV.linux.org.uk> 18Al Viro <viro@zenIV.linux.org.uk>
19Andreas Herrmann <aherrman@de.ibm.com> 19Andreas Herrmann <aherrman@de.ibm.com>
20Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
20Andrew Morton <akpm@linux-foundation.org> 21Andrew Morton <akpm@linux-foundation.org>
21Andrew Vasquez <andrew.vasquez@qlogic.com> 22Andrew Vasquez <andrew.vasquez@qlogic.com>
22Andy Adamson <andros@citi.umich.edu> 23Andy Adamson <andros@citi.umich.edu>
@@ -116,6 +117,7 @@ Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
116Simon Kelley <simon@thekelleys.org.uk> 117Simon Kelley <simon@thekelleys.org.uk>
117Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> 118Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
118Stephen Hemminger <shemminger@osdl.org> 119Stephen Hemminger <shemminger@osdl.org>
120Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
119Sumit Semwal <sumit.semwal@ti.com> 121Sumit Semwal <sumit.semwal@ti.com>
120Tejun Heo <htejun@gmail.com> 122Tejun Heo <htejun@gmail.com>
121Thomas Graf <tgraf@suug.ch> 123Thomas Graf <tgraf@suug.ch>
@@ -125,7 +127,9 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
125Uwe Kleine-König <ukl@pengutronix.de> 127Uwe Kleine-König <ukl@pengutronix.de>
126Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 128Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
127Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 129Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
128Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com> 130Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
131Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
132Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
129Takashi YOSHII <takashi.yoshii.zj@renesas.com> 133Takashi YOSHII <takashi.yoshii.zj@renesas.com>
130Yusuke Goda <goda.yusuke@renesas.com> 134Yusuke Goda <goda.yusuke@renesas.com>
131Gustavo Padovan <gustavo@las.ic.unicamp.br> 135Gustavo Padovan <gustavo@las.ic.unicamp.br>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index bbed111c31b4..70c9b1ac66db 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1234,10 +1234,8 @@ Description:
1234 object is near the sensor, usually be observing 1234 object is near the sensor, usually be observing
1235 reflectivity of infrared or ultrasound emitted. 1235 reflectivity of infrared or ultrasound emitted.
1236 Often these sensors are unit less and as such conversion 1236 Often these sensors are unit less and as such conversion
1237 to SI units is not possible. Where it is, the units should 1237 to SI units is not possible. Higher proximity measurements
1238 be meters. If such a conversion is not possible, the reported 1238 indicate closer objects, and vice versa.
1239 values should behave in the same way as a distance, i.e. lower
1240 values indicate something is closer to the sensor.
1241 1239
1242What: /sys/.../iio:deviceX/in_illuminance_input 1240What: /sys/.../iio:deviceX/in_illuminance_input
1243What: /sys/.../iio:deviceX/in_illuminance_raw 1241What: /sys/.../iio:deviceX/in_illuminance_raw
diff --git a/Documentation/DocBook/alsa-driver-api.tmpl b/Documentation/DocBook/alsa-driver-api.tmpl
index 71f9246127ec..e94a10bb4a9e 100644
--- a/Documentation/DocBook/alsa-driver-api.tmpl
+++ b/Documentation/DocBook/alsa-driver-api.tmpl
@@ -108,7 +108,7 @@
108 <sect1><title>ASoC Core API</title> 108 <sect1><title>ASoC Core API</title>
109!Iinclude/sound/soc.h 109!Iinclude/sound/soc.h
110!Esound/soc/soc-core.c 110!Esound/soc/soc-core.c
111!Esound/soc/soc-cache.c 111<!-- !Esound/soc/soc-cache.c no docbook comments here -->
112!Esound/soc/soc-devres.c 112!Esound/soc/soc-devres.c
113!Esound/soc/soc-io.c 113!Esound/soc/soc-io.c
114!Esound/soc/soc-pcm.c 114!Esound/soc/soc-pcm.c
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index c0312cbd023d..2fb9a5457522 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev)
3383 <td valign="top" >TBD</td> 3383 <td valign="top" >TBD</td>
3384 </tr> 3384 </tr>
3385 <tr> 3385 <tr>
3386 <td rowspan="2" valign="top" >omap</td> 3386 <td valign="top" >omap</td>
3387 <td valign="top" >Generic</td> 3387 <td valign="top" >Generic</td>
3388 <td valign="top" >“zorder”</td> 3388 <td valign="top" >“zorder”</td>
3389 <td valign="top" >RANGE</td> 3389 <td valign="top" >RANGE</td>
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 65610bf52ebf..1b049be6c84f 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -60,4 +60,4 @@ Introduction
60 Document Author 60 Document Author
61 --------------- 61 ---------------
62 62
63 Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics 63 Viresh Kumar <vireshk@kernel.org>, (c) 2010-2012 ST Microelectronics
diff --git a/Documentation/arm/sunxi/README b/Documentation/arm/sunxi/README
index 1fe2d7fd4108..5e38e1582f95 100644
--- a/Documentation/arm/sunxi/README
+++ b/Documentation/arm/sunxi/README
@@ -36,7 +36,7 @@ SunXi family
36 + User Manual 36 + User Manual
37 http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf 37 http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
38 38
39 - Allwinner A23 39 - Allwinner A23 (sun8i)
40 + Datasheet 40 + Datasheet
41 http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf 41 http://dl.linux-sunxi.org/A23/A23%20Datasheet%20V1.0%2020130830.pdf
42 + User Manual 42 + User Manual
@@ -55,7 +55,23 @@ SunXi family
55 + User Manual 55 + User Manual
56 http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf 56 http://dl.linux-sunxi.org/A31/A3x_release_document/A31s/IC/A31s%20User%20Manual%20%20V1.0%2020130322.pdf
57 57
58 - Allwinner A33 (sun8i)
59 + Datasheet
60 http://dl.linux-sunxi.org/A33/A33%20Datasheet%20release%201.1.pdf
61 + User Manual
62 http://dl.linux-sunxi.org/A33/A33%20user%20manual%20release%201.1.pdf
63
64 - Allwinner H3 (sun8i)
65 + Datasheet
66 http://dl.linux-sunxi.org/H3/Allwinner_H3_Datasheet_V1.0.pdf
67
58 * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs 68 * Quad ARM Cortex-A15, Quad ARM Cortex-A7 based SoCs
59 - Allwinner A80 69 - Allwinner A80
60 + Datasheet 70 + Datasheet
61 http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf 71 http://dl.linux-sunxi.org/A80/A80_Datasheet_Revision_1.0_0404.pdf
72
73 * Octa ARM Cortex-A7 based SoCs
74 - Allwinner A83T
75 + Not Supported
76 + Datasheet
77 http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 82960cffbad3..785eab87aa71 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -258,6 +258,12 @@ cache metadata mode : ro if read-only, rw if read-write
258 no further I/O will be permitted and the status will just 258 no further I/O will be permitted and the status will just
259 contain the string 'Fail'. The userspace recovery tools 259 contain the string 'Fail'. The userspace recovery tools
260 should then be used. 260 should then be used.
261needs_check : 'needs_check' if set, '-' if not set
262 A metadata operation has failed, resulting in the needs_check
263 flag being set in the metadata's superblock. The metadata
264 device must be deactivated and checked/repaired before the
265 cache can be made fully operational again. '-' indicates
266 needs_check is not set.
261 267
262Messages 268Messages
263-------- 269--------
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 4f67578b2954..1699a55b7b70 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -296,7 +296,7 @@ ii) Status
296 underlying device. When this is enabled when loading the table, 296 underlying device. When this is enabled when loading the table,
297 it can get disabled if the underlying device doesn't support it. 297 it can get disabled if the underlying device doesn't support it.
298 298
299 ro|rw 299 ro|rw|out_of_data_space
300 If the pool encounters certain types of device failures it will 300 If the pool encounters certain types of device failures it will
301 drop into a read-only metadata mode in which no changes to 301 drop into a read-only metadata mode in which no changes to
302 the pool metadata (like allocating new blocks) are permitted. 302 the pool metadata (like allocating new blocks) are permitted.
@@ -314,6 +314,13 @@ ii) Status
314 module parameter can be used to change this timeout -- it 314 module parameter can be used to change this timeout -- it
315 defaults to 60 seconds but may be disabled using a value of 0. 315 defaults to 60 seconds but may be disabled using a value of 0.
316 316
317 needs_check
318 A metadata operation has failed, resulting in the needs_check
319 flag being set in the metadata's superblock. The metadata
320 device must be deactivated and checked/repaired before the
321 thin-pool can be made fully operational again. '-' indicates
322 needs_check is not set.
323
317iii) Messages 324iii) Messages
318 325
319 create_thin <dev id> 326 create_thin <dev id>
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794cef0b8..91e6e5c478d0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
199 "qcom,kpss-acc-v1" 199 "qcom,kpss-acc-v1"
200 "qcom,kpss-acc-v2" 200 "qcom,kpss-acc-v2"
201 "rockchip,rk3066-smp" 201 "rockchip,rk3066-smp"
202 "ste,dbx500-smp"
202 203
203 - cpu-release-addr 204 - cpu-release-addr
204 Usage: required for systems that have an "enable-method" 205 Usage: required for systems that have an "enable-method"
diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt
index 42941fdefb11..67da20539540 100644
--- a/Documentation/devicetree/bindings/arm/sunxi.txt
+++ b/Documentation/devicetree/bindings/arm/sunxi.txt
@@ -9,4 +9,6 @@ using one of the following compatible strings:
9 allwinner,sun6i-a31 9 allwinner,sun6i-a31
10 allwinner,sun7i-a20 10 allwinner,sun7i-a20
11 allwinner,sun8i-a23 11 allwinner,sun8i-a23
12 allwinner,sun8i-a33
13 allwinner,sun8i-h3
12 allwinner,sun9i-a80 14 allwinner,sun9i-a80
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
index d3058768b23d..c53e0b08032f 100644
--- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -35,7 +35,7 @@ Example:
35 device_type = "dma"; 35 device_type = "dma";
36 reg = <0x0 0x1f270000 0x0 0x10000>, 36 reg = <0x0 0x1f270000 0x0 0x10000>,
37 <0x0 0x1f200000 0x0 0x10000>, 37 <0x0 0x1f200000 0x0 0x10000>,
38 <0x0 0x1b008000 0x0 0x2000>, 38 <0x0 0x1b000000 0x0 0x400000>,
39 <0x0 0x1054a000 0x0 0x100>; 39 <0x0 0x1054a000 0x0 0x100>;
40 interrupts = <0x0 0x82 0x4>, 40 interrupts = <0x0 0x82 0x4>,
41 <0x0 0xb8 0x4>, 41 <0x0 0xb8 0x4>,
diff --git a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
index e75f0e549fff..971c3eedb1c7 100644
--- a/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
@@ -65,8 +65,10 @@ Optional properties:
65- edid: verbatim EDID data block describing attached display. 65- edid: verbatim EDID data block describing attached display.
66- ddc: phandle describing the i2c bus handling the display data 66- ddc: phandle describing the i2c bus handling the display data
67 channel 67 channel
68- port: A port node with endpoint definitions as defined in 68- port@[0-1]: Port nodes with endpoint definitions as defined in
69 Documentation/devicetree/bindings/media/video-interfaces.txt. 69 Documentation/devicetree/bindings/media/video-interfaces.txt.
70 Port 0 is the input port connected to the IPU display interface,
71 port 1 is the output port connected to a panel.
70 72
71example: 73example:
72 74
@@ -75,9 +77,29 @@ display@di0 {
75 edid = [edid-data]; 77 edid = [edid-data];
76 interface-pix-fmt = "rgb24"; 78 interface-pix-fmt = "rgb24";
77 79
78 port { 80 port@0 {
81 reg = <0>;
82
79 display_in: endpoint { 83 display_in: endpoint {
80 remote-endpoint = <&ipu_di0_disp0>; 84 remote-endpoint = <&ipu_di0_disp0>;
81 }; 85 };
82 }; 86 };
87
88 port@1 {
89 reg = <1>;
90
91 display_out: endpoint {
92 remote-endpoint = <&panel_in>;
93 };
94 };
95};
96
97panel {
98 ...
99
100 port {
101 panel_in: endpoint {
102 remote-endpoint = <&display_out>;
103 };
104 };
83}; 105};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index c03eec116872..3443e0f838df 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -35,3 +35,6 @@ the PCIe specification.
35 35
36 NOTE: this only applies to the SMMU itself, not 36 NOTE: this only applies to the SMMU itself, not
37 masters connected upstream of the SMMU. 37 masters connected upstream of the SMMU.
38
39- hisilicon,broken-prefetch-cmd
40 : Avoid sending CMD_PREFETCH_* commands to the SMMU.
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
index 938f8e1ba205..0db60470ebb6 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
@@ -8,6 +8,7 @@ of the EMIF IP and memory parts attached to it.
8Required properties: 8Required properties:
9- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev> 9- compatible : Should be of the form "ti,emif-<ip-rev>" where <ip-rev>
10 is the IP revision of the specific EMIF instance. 10 is the IP revision of the specific EMIF instance.
11 For am437x should be ti,emif-am4372.
11 12
12- phy-type : <u32> indicating the DDR phy type. Following are the 13- phy-type : <u32> indicating the DDR phy type. Following are the
13 allowed values 14 allowed values
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 5d0376b8f202..211e7785f4d2 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -17,7 +17,6 @@ Required properties:
17 "fsl,imx6sx-usdhc" 17 "fsl,imx6sx-usdhc"
18 18
19Optional properties: 19Optional properties:
20- fsl,cd-controller : Indicate to use controller internal card detection
21- fsl,wp-controller : Indicate to use controller internal write protection 20- fsl,wp-controller : Indicate to use controller internal write protection
22- fsl,delay-line : Specify the number of delay cells for override mode. 21- fsl,delay-line : Specify the number of delay cells for override mode.
23 This is used to set the clock delay for DLL(Delay Line) on override mode 22 This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
35 compatible = "fsl,imx51-esdhc"; 34 compatible = "fsl,imx51-esdhc";
36 reg = <0x70004000 0x4000>; 35 reg = <0x70004000 0x4000>;
37 interrupts = <1>; 36 interrupts = <1>;
38 fsl,cd-controller;
39 fsl,wp-controller; 37 fsl,wp-controller;
40}; 38};
41 39
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 305e3df3d9b1..9cf9446eaf2e 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -82,6 +82,9 @@ Optional properties:
82 - id: If there are multiple instance of the same type, in order to 82 - id: If there are multiple instance of the same type, in order to
83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe 83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe
84 PHY). If "id" is not provided, it is set to default value of '1'. 84 PHY). If "id" is not provided, it is set to default value of '1'.
85 - syscon-pllreset: Handle to system control region that contains the
86 CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
87 register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
85 88
86This is usually a subnode of ocp2scp to which it is connected. 89This is usually a subnode of ocp2scp to which it is connected.
87 90
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
100 "sysclk", 103 "sysclk",
101 "refclk"; 104 "refclk";
102}; 105};
106
107sata_phy: phy@4A096000 {
108 compatible = "ti,phy-pipe3-sata";
109 reg = <0x4A096000 0x80>, /* phy_rx */
110 <0x4A096400 0x64>, /* phy_tx */
111 <0x4A096800 0x40>; /* pll_ctrl */
112 reg-names = "phy_rx", "phy_tx", "pll_ctrl";
113 ctrl-module = <&omap_control_sata>;
114 clocks = <&sys_clkin1>, <&sata_ref_clk>;
115 clock-names = "sysclk", "refclk";
116 syscon-pllreset = <&scm_conf 0x3fc>;
117 #phy-cells = <0>;
118};
diff --git a/Documentation/devicetree/bindings/sound/cs4349.txt b/Documentation/devicetree/bindings/sound/cs4349.txt
new file mode 100644
index 000000000000..54c117b59dba
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs4349.txt
@@ -0,0 +1,19 @@
1CS4349 audio CODEC
2
3Required properties:
4
5 - compatible : "cirrus,cs4349"
6
7 - reg : the I2C address of the device for I2C
8
9Optional properties:
10
11 - reset-gpios : a GPIO spec for the reset pin.
12
13Example:
14
15codec: cs4349@48 {
16 compatible = "cirrus,cs4349";
17 reg = <0x48>;
18 reset-gpios = <&gpio 54 0>;
19};
diff --git a/Documentation/devicetree/bindings/sound/ics43432.txt b/Documentation/devicetree/bindings/sound/ics43432.txt
new file mode 100644
index 000000000000..b02e3a6c0fef
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ics43432.txt
@@ -0,0 +1,17 @@
1Invensense ICS-43432 MEMS microphone with I2S output.
2
3There are no software configuration options for this device, indeed, the only
4host connection is the I2S interface. Apart from requirements on clock
5frequency (460 kHz to 3.379 MHz according to the data sheet) there must be
664 clock cycles in each stereo output frame; 24 of the 32 available bits
7contain audio data. A hardware pin determines if the device outputs data
8on the left or right channel of the I2S frame.
9
10Required properties:
11 - compatible : Must be "invensense,ics43432"
12
13Example:
14
15 ics43432: ics43432 {
16 compatible = "invensense,ics43432";
17 };
diff --git a/Documentation/devicetree/bindings/sound/max98357a.txt b/Documentation/devicetree/bindings/sound/max98357a.txt
index a7a149a236e5..28645a2ff885 100644
--- a/Documentation/devicetree/bindings/sound/max98357a.txt
+++ b/Documentation/devicetree/bindings/sound/max98357a.txt
@@ -4,7 +4,11 @@ This node models the Maxim MAX98357A DAC.
4 4
5Required properties: 5Required properties:
6- compatible : "maxim,max98357a" 6- compatible : "maxim,max98357a"
7- sdmode-gpios : GPIO specifier for the GPIO -> DAC SDMODE pin 7
8Optional properties:
9- sdmode-gpios : GPIO specifier for the chip's SD_MODE pin.
10 If this option is not specified then driver does not manage
11 the pin state (e.g. chip is always on).
8 12
9Example: 13Example:
10 14
diff --git a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
index 829bd26d17f8..519e97c8f1b8 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-max98090" 4- compatible : "mediatek,mt8173-max98090"
5- mediatek,audio-codec: the phandle of the MAX98090 audio codec 5- mediatek,audio-codec: the phandle of the MAX98090 audio codec
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-max98090"; 11 compatible = "mediatek,mt8173-max98090";
11 mediatek,audio-codec = <&max98090>; 12 mediatek,audio-codec = <&max98090>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index 61e98c976bd4..f205ce9e31dd 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-rt5650-rt5676" 4- compatible : "mediatek,mt8173-rt5650-rt5676"
5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs 5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-rt5650-rt5676"; 11 compatible = "mediatek,mt8173-rt5650-rt5676";
11 mediatek,audio-codec = <&rt5650 &rt5676>; 12 mediatek,audio-codec = <&rt5650 &rt5676>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index b6b3a786855f..1173395b5e5c 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -18,6 +18,12 @@ Required properties:
18- rcar_sound,src : Should contain SRC feature. 18- rcar_sound,src : Should contain SRC feature.
19 The number of SRC subnode should be same as HW. 19 The number of SRC subnode should be same as HW.
20 see below for detail. 20 see below for detail.
21- rcar_sound,ctu : Should contain CTU feature.
22 The number of CTU subnode should be same as HW.
23 see below for detail.
24- rcar_sound,mix : Should contain MIX feature.
25 The number of MIX subnode should be same as HW.
26 see below for detail.
21- rcar_sound,dvc : Should contain DVC feature. 27- rcar_sound,dvc : Should contain DVC feature.
22 The number of DVC subnode should be same as HW. 28 The number of DVC subnode should be same as HW.
23 see below for detail. 29 see below for detail.
@@ -90,6 +96,22 @@ rcar_sound: sound@ec500000 {
90 }; 96 };
91 }; 97 };
92 98
99 rcar_sound,mix {
100 mix0: mix@0 { };
101 mix1: mix@1 { };
102 };
103
104 rcar_sound,ctu {
105 ctu00: ctu@0 { };
106 ctu01: ctu@1 { };
107 ctu02: ctu@2 { };
108 ctu03: ctu@3 { };
109 ctu10: ctu@4 { };
110 ctu11: ctu@5 { };
111 ctu12: ctu@6 { };
112 ctu13: ctu@7 { };
113 };
114
93 rcar_sound,src { 115 rcar_sound,src {
94 src0: src@0 { 116 src0: src@0 {
95 interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>; 117 interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
index c64155027288..962748a8d919 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
@@ -6,6 +6,7 @@ Required properties:
6 6
7- compatible : "renesas,rsrc-card,<board>" 7- compatible : "renesas,rsrc-card,<board>"
8 Examples with soctypes are: 8 Examples with soctypes are:
9 - "renesas,rsrc-card"
9 - "renesas,rsrc-card,lager" 10 - "renesas,rsrc-card,lager"
10 - "renesas,rsrc-card,koelsch" 11 - "renesas,rsrc-card,koelsch"
11Optional properties: 12Optional properties:
@@ -29,6 +30,12 @@ Optional subnode properties:
29- frame-inversion : bool property. Add this if the 30- frame-inversion : bool property. Add this if the
30 dai-link uses frame clock inversion. 31 dai-link uses frame clock inversion.
31- convert-rate : platform specified sampling rate convert 32- convert-rate : platform specified sampling rate convert
33- audio-prefix : see audio-routing
34- audio-routing : A list of the connections between audio components.
35 Each entry is a pair of strings, the first being the connection's sink,
36 the second being the connection's source. Valid names for sources.
37 use audio-prefix if some components is using same sink/sources naming.
38 it can be used if compatible was "renesas,rsrc-card";
32 39
33Required CPU/CODEC subnodes properties: 40Required CPU/CODEC subnodes properties:
34 41
diff --git a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
new file mode 100644
index 000000000000..a805aa99ad75
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
@@ -0,0 +1,19 @@
1ROCKCHIP with MAX98090 CODEC
2
3Required properties:
4- compatible: "rockchip,rockchip-audio-max98090"
5- rockchip,model: The user-visible name of this sound complex
6- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
7 connected to the CODEC
8- rockchip,audio-codec: The phandle of the MAX98090 audio codec
9- rockchip,headset-codec: The phandle of Ext chip for jack detection
10
11Example:
12
13sound {
14 compatible = "rockchip,rockchip-audio-max98090";
15 rockchip,model = "ROCKCHIP-I2S";
16 rockchip,i2s-controller = <&i2s>;
17 rockchip,audio-codec = <&max98090>;
18 rockchip,headset-codec = <&headsetcodec>;
19};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
new file mode 100644
index 000000000000..411a62b3ff41
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
@@ -0,0 +1,17 @@
1ROCKCHIP with RT5645/RT5650 CODECS
2
3Required properties:
4- compatible: "rockchip,rockchip-audio-rt5645"
5- rockchip,model: The user-visible name of this sound complex
6- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
7 connected to the CODEC
8- rockchip,audio-codec: The phandle of the RT5645/RT5650 audio codec
9
10Example:
11
12sound {
13 compatible = "rockchip,rockchip-audio-rt5645";
14 rockchip,model = "ROCKCHIP-I2S";
15 rockchip,i2s-controller = <&i2s>;
16 rockchip,audio-codec = <&rt5645>;
17};
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
new file mode 100644
index 000000000000..028fa1c82f50
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -0,0 +1,155 @@
1STMicroelectronics sti ASoC cards
2
3The sti ASoC Sound Card can be used, for all sti SoCs using internal sti-sas
4codec or external codecs.
5
6sti sound drivers allows to expose sti SoC audio interface through the
7generic ASoC simple card. For details about sound card declaration please refer to
8Documentation/devicetree/bindings/sound/simple-card.txt.
9
101) sti-uniperiph-dai: audio dai device.
11---------------------------------------
12
13Required properties:
14 - compatible: "st,sti-uni-player" or "st,sti-uni-reader"
15
16 - st,syscfg: phandle to boot-device system configuration registers
17
18 - clock-names: name of the clocks listed in clocks property in the same order
19
20 - reg: CPU DAI IP Base address and size entries, listed in same
21 order than the CPU_DAI properties.
22
23 - reg-names: names of the mapped memory regions listed in regs property in
24 the same order.
25
26 - interrupts: CPU_DAI interrupt line, listed in the same order than the
27 CPU_DAI properties.
28
29 - dma: CPU_DAI DMA controller phandle and DMA request line, listed in the same
30 order than the CPU_DAI properties.
31
32 - dma-names: identifier string for each DMA request line in the dmas property.
33 "tx" for "st,sti-uni-player" compatibility
34 "rx" for "st,sti-uni-reader" compatibility
35
36 - version: IP version integrated in SOC.
37
38 - dai-name: DAI name that describes the IP.
39
40Required properties ("st,sti-uni-player" compatibility only):
41 - clocks: CPU_DAI IP clock source, listed in the same order than the
42 CPU_DAI properties.
43
44 - uniperiph-id: internal SOC IP instance ID.
45
46 - IP mode: IP working mode depending on associated codec.
47 "HDMI" connected to HDMI codec IP and IEC HDMI formats.
48 "SPDIF"connected to SPDIF codec and support SPDIF formats.
49 "PCM" PCM standard mode for I2S or TDM bus.
50
51Optional properties:
52 - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
53 external codecs connection.
54
55 - pinctrl-names: should contain only one value - "default".
56
57Example:
58
59 sti_uni_player2: sti-uni-player@2 {
60 compatible = "st,sti-uni-player";
61 status = "okay";
62 #sound-dai-cells = <0>;
63 st,syscfg = <&syscfg_core>;
64 clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
65 reg = <0x8D82000 0x158>;
66 interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
67 dmas = <&fdma0 4 0 1>;
68 dai-name = "Uni Player #1 (DAC)";
69 dma-names = "tx";
70 uniperiph-id = <2>;
71 version = <5>;
72 mode = "PCM";
73 };
74
75 sti_uni_player3: sti-uni-player@3 {
76 compatible = "st,sti-uni-player";
77 status = "okay";
78 #sound-dai-cells = <0>;
79 st,syscfg = <&syscfg_core>;
80 clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
81 reg = <0x8D85000 0x158>;
82 interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
83 dmas = <&fdma0 7 0 1>;
84 dma-names = "tx";
85 dai-name = "Uni Player #1 (PIO)";
86 uniperiph-id = <3>;
87 version = <5>;
88 mode = "SPDIF";
89 };
90
91 sti_uni_reader1: sti-uni-reader@1 {
92 compatible = "st,sti-uni-reader";
93 status = "disabled";
94 #sound-dai-cells = <0>;
95 st,syscfg = <&syscfg_core>;
96 reg = <0x8D84000 0x158>;
97 interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
98 dmas = <&fdma0 6 0 1>;
99 dma-names = "rx";
100 dai-name = "Uni Reader #1 (HDMI RX)";
101 version = <3>;
102 };
103
1042) sti-sas-codec: internal audio codec IPs driver
105-------------------------------------------------
106
107Required properties:
108 - compatible: "st,sti<chip>-sas-codec" .
109 Should be chip "st,stih416-sas-codec" or "st,stih407-sas-codec"
110
111 - st,syscfg: phandle to boot-device system configuration registers.
112
113 - pinctrl-0: SPDIF PIO description.
114
115 - pinctrl-names: should contain only one value - "default".
116
117Example:
118 sti_sas_codec: sti-sas-codec {
119 compatible = "st,stih407-sas-codec";
120 #sound-dai-cells = <1>;
121 st,reg_audio = <&syscfg_core>;
122 pinctrl-names = "default";
123 pinctrl-0 = <&pinctrl_spdif_out >;
124 };
125
126Example of audio card declaration:
127 sound {
128 compatible = "simple-audio-card";
129 simple-audio-card,name = "sti audio card";
130 status = "okay";
131
132 simple-audio-card,dai-link@0 {
133 /* DAC */
134 format = "i2s";
135 dai-tdm-slot-width = <32>;
136 cpu {
137 sound-dai = <&sti_uni_player2>;
138 };
139
140 codec {
141 sound-dai = <&sti_sasg_codec 1>;
142 };
143 };
144 simple-audio-card,dai-link@1 {
145 /* SPDIF */
146 format = "left_j";
147 cpu {
148 sound-dai = <&sti_uni_player3>;
149 };
150
151 codec {
152 sound-dai = <&sti_sasg_codec 0>;
153 };
154 };
155 };
diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt
index f1ad9c367532..9c696fa66f81 100644
--- a/Documentation/devicetree/bindings/spi/spi-ath79.txt
+++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
3Required properties: 3Required properties:
4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. 4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
5- reg: Base address and size of the controllers memory area 5- reg: Base address and size of the controllers memory area
6- clocks: phandle to the AHB clock. 6- clocks: phandle of the AHB clock.
7- clock-names: has to be "ahb". 7- clock-names: has to be "ahb".
8- #address-cells: <1>, as required by generic SPI binding. 8- #address-cells: <1>, as required by generic SPI binding.
9- #size-cells: <0>, also as required by generic SPI binding. 9- #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
12 12
13Example: 13Example:
14 14
15 spi@1F000000 { 15 spi@1f000000 {
16 compatible = "qca,ar9132-spi", "qca,ar7100-spi"; 16 compatible = "qca,ar9132-spi", "qca,ar7100-spi";
17 reg = <0x1F000000 0x10>; 17 reg = <0x1f000000 0x10>;
18 18
19 clocks = <&pll 2>; 19 clocks = <&pll 2>;
20 clock-names = "ahb"; 20 clock-names = "ahb";
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index d444757c4d9e..66a33ae5f5bc 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -110,6 +110,7 @@ ingenic Ingenic Semiconductor
110innolux Innolux Corporation 110innolux Innolux Corporation
111intel Intel Corporation 111intel Intel Corporation
112intercontrol Inter Control Group 112intercontrol Inter Control Group
113invensense InvenSense Inc.
113isee ISEE 2007 S.L. 114isee ISEE 2007 S.L.
114isil Intersil 115isil Intersil
115karo Ka-Ro electronics GmbH 116karo Ka-Ro electronics GmbH
@@ -150,6 +151,7 @@ nvidia NVIDIA
150nxp NXP Semiconductors 151nxp NXP Semiconductors
151onnn ON Semiconductor Corp. 152onnn ON Semiconductor Corp.
152opencores OpenCores.org 153opencores OpenCores.org
154option Option NV
153ortustech Ortus Technology Co., Ltd. 155ortustech Ortus Technology Co., Ltd.
154ovti OmniVision Technologies 156ovti OmniVision Technologies
155panasonic Panasonic Corporation 157panasonic Panasonic Corporation
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
index 014f112e2a14..57fffe33ebfc 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904
@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
35temp[2-9]_input CPU temperatures (1/1000 degree, 35temp[2-9]_input CPU temperatures (1/1000 degree,
36 0.125 degree resolution) 36 0.125 degree resolution)
37 37
38fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode 38pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
39 Setting SmartFan mode is supported only if it has been 39 Setting SmartFan mode is supported only if it has been
40 previously configured by BIOS (or configuration EEPROM) 40 previously configured by BIOS (or configuration EEPROM)
41 41
42fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode 42pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
43 43
44The driver checks sensor control registers and does not export the sensors 44The driver checks sensor control registers and does not export the sensors
45that are not enabled. Anyway, a sensor that is enabled may actually be not 45that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
120 120
121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for 121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
122the DualPoint Stick. For non interleaved dualpoint devices the pointingstick 122the DualPoint Stick. The M, R and L bits signal the combined status of both
123buttons get reported separately in the PSM, PSR and PSL bits. 123the pointingstick and touchpad buttons, except for Dell dualpoint devices
124where the pointingstick buttons get reported separately in the PSM, PSR
125and PSL bits.
124 126
125Dualpoint device -- interleaved packet format 127Dualpoint device -- interleaved packet format
126--------------------------------------------- 128---------------------------------------------
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index e63b446d973c..13f888a02a3d 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
952 $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic 952 $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
953 mode) if this option is supported by $(AR). 953 mode) if this option is supported by $(AR).
954 954
955 ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS Overrides the kbuild defaults
956
957 These variables are appended to the KBUILD_CPPFLAGS,
958 KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
959 top-level Makefile has set any other flags. This provides a
960 means for an architecture to override the defaults.
961
962
955--- 6.2 Add prerequisites to archheaders: 963--- 6.2 Add prerequisites to archheaders:
956 964
957 The archheaders: rule is used to generate header files that 965 The archheaders: rule is used to generate header files that
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index f732a8321e8a..8cc17ca71813 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -410,8 +410,17 @@ Documentation/usb/persist.txt.
410 410
411Q: Can I suspend-to-disk using a swap partition under LVM? 411Q: Can I suspend-to-disk using a swap partition under LVM?
412 412
413A: No. You can suspend successfully, but you'll not be able to 413A: Yes and No. You can suspend successfully, but the kernel will not be able
414resume. uswsusp should be able to work with LVM. See suspend.sf.net. 414to resume on its own. You need an initramfs that can recognize the resume
415situation, activate the logical volume containing the swap volume (but not
416touch any filesystems!), and eventually call
417
418echo -n "$major:$minor" > /sys/power/resume
419
420where $major and $minor are the respective major and minor device numbers of
421the swap volume.
422
423uswsusp works with LVM, too. See http://suspend.sourceforge.net/
415 424
416Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were 425Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
417compiled with the similar configuration files. Anyway I found that 426compiled with the similar configuration files. Anyway I found that
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 949de191fcdc..cda56df9b8a7 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
199 buf += "#include <linux/string.h>\n" 199 buf += "#include <linux/string.h>\n"
200 buf += "#include <linux/configfs.h>\n" 200 buf += "#include <linux/configfs.h>\n"
201 buf += "#include <linux/ctype.h>\n" 201 buf += "#include <linux/ctype.h>\n"
202 buf += "#include <asm/unaligned.h>\n\n" 202 buf += "#include <asm/unaligned.h>\n"
203 buf += "#include <scsi/scsi_proto.h>\n\n"
203 buf += "#include <target/target_core_base.h>\n" 204 buf += "#include <target/target_core_base.h>\n"
204 buf += "#include <target/target_core_fabric.h>\n" 205 buf += "#include <target/target_core_fabric.h>\n"
205 buf += "#include <target/target_core_fabric_configfs.h>\n" 206 buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
230 buf += " }\n" 231 buf += " }\n"
231 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 232 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
232 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 233 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
233 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" 234
234 buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" 235 if proto_ident == "FC":
236 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
237 elif proto_ident == "SAS":
238 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
239 elif proto_ident == "iSCSI":
240 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
241
235 buf += " if (ret < 0) {\n" 242 buf += " if (ret < 0) {\n"
236 buf += " kfree(tpg);\n" 243 buf += " kfree(tpg);\n"
237 buf += " return NULL;\n" 244 buf += " return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
292 299
293 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 300 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
294 buf += " .module = THIS_MODULE,\n" 301 buf += " .module = THIS_MODULE,\n"
295 buf += " .name = " + fabric_mod_name + ",\n" 302 buf += " .name = \"" + fabric_mod_name + "\",\n"
296 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 303 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
297 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" 304 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
298 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" 305 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
322 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" 329 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
323 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" 330 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
324 buf += "\n" 331 buf += "\n"
325 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 332 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
326 buf += "};\n\n" 333 buf += "};\n\n"
327 334
328 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 335 buf += "static int __init " + fabric_mod_name + "_init(void)\n"
329 buf += "{\n" 336 buf += "{\n"
330 buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" 337 buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
331 buf += "};\n\n" 338 buf += "};\n\n"
332 339
333 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" 340 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
334 buf += "{\n" 341 buf += "{\n"
335 buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" 342 buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
336 buf += "};\n\n" 343 buf += "};\n\n"
337 344
338 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 345 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index 8133cefb6b6e..569568f6644f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -361,11 +361,11 @@ S: Supported
361F: drivers/input/touchscreen/ad7879.c 361F: drivers/input/touchscreen/ad7879.c
362 362
363ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR) 363ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
364M: Jiri Kosina <jkosina@suse.cz> 364M: Jiri Kosina <jkosina@suse.com>
365S: Maintained 365S: Maintained
366 366
367ADM1025 HARDWARE MONITOR DRIVER 367ADM1025 HARDWARE MONITOR DRIVER
368M: Jean Delvare <jdelvare@suse.de> 368M: Jean Delvare <jdelvare@suse.com>
369L: lm-sensors@lm-sensors.org 369L: lm-sensors@lm-sensors.org
370S: Maintained 370S: Maintained
371F: Documentation/hwmon/adm1025 371F: Documentation/hwmon/adm1025
@@ -430,7 +430,7 @@ S: Maintained
430F: drivers/macintosh/therm_adt746x.c 430F: drivers/macintosh/therm_adt746x.c
431 431
432ADT7475 HARDWARE MONITOR DRIVER 432ADT7475 HARDWARE MONITOR DRIVER
433M: Jean Delvare <jdelvare@suse.de> 433M: Jean Delvare <jdelvare@suse.com>
434L: lm-sensors@lm-sensors.org 434L: lm-sensors@lm-sensors.org
435S: Maintained 435S: Maintained
436F: Documentation/hwmon/adt7475 436F: Documentation/hwmon/adt7475
@@ -445,7 +445,7 @@ F: drivers/input/misc/adxl34x.c
445 445
446ADVANSYS SCSI DRIVER 446ADVANSYS SCSI DRIVER
447M: Matthew Wilcox <matthew@wil.cx> 447M: Matthew Wilcox <matthew@wil.cx>
448M: Hannes Reinecke <hare@suse.de> 448M: Hannes Reinecke <hare@suse.com>
449L: linux-scsi@vger.kernel.org 449L: linux-scsi@vger.kernel.org
450S: Maintained 450S: Maintained
451F: Documentation/scsi/advansys.txt 451F: Documentation/scsi/advansys.txt
@@ -506,7 +506,7 @@ F: drivers/scsi/aha152x*
506F: drivers/scsi/pcmcia/aha152x* 506F: drivers/scsi/pcmcia/aha152x*
507 507
508AIC7XXX / AIC79XX SCSI DRIVER 508AIC7XXX / AIC79XX SCSI DRIVER
509M: Hannes Reinecke <hare@suse.de> 509M: Hannes Reinecke <hare@suse.com>
510L: linux-scsi@vger.kernel.org 510L: linux-scsi@vger.kernel.org
511S: Maintained 511S: Maintained
512F: drivers/scsi/aic7xxx/ 512F: drivers/scsi/aic7xxx/
@@ -746,7 +746,7 @@ S: Maintained
746F: sound/aoa/ 746F: sound/aoa/
747 747
748APM DRIVER 748APM DRIVER
749M: Jiri Kosina <jkosina@suse.cz> 749M: Jiri Kosina <jkosina@suse.com>
750S: Odd fixes 750S: Odd fixes
751F: arch/x86/kernel/apm_32.c 751F: arch/x86/kernel/apm_32.c
752F: include/linux/apm_bios.h 752F: include/linux/apm_bios.h
@@ -1001,6 +1001,7 @@ ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
1001M: Baruch Siach <baruch@tkos.co.il> 1001M: Baruch Siach <baruch@tkos.co.il>
1002L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1002L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1003S: Maintained 1003S: Maintained
1004F: arch/arm/boot/dts/cx92755*
1004N: digicolor 1005N: digicolor
1005 1006
1006ARM/EBSA110 MACHINE SUPPORT 1007ARM/EBSA110 MACHINE SUPPORT
@@ -1324,7 +1325,7 @@ F: arch/arm/mach-pxa/include/mach/palmtc.h
1324F: arch/arm/mach-pxa/palmtc.c 1325F: arch/arm/mach-pxa/palmtc.c
1325 1326
1326ARM/PALM TREO SUPPORT 1327ARM/PALM TREO SUPPORT
1327M: Tomas Cech <sleep_walker@suse.cz> 1328M: Tomas Cech <sleep_walker@suse.com>
1328L: linux-arm-kernel@lists.infradead.org 1329L: linux-arm-kernel@lists.infradead.org
1329W: http://hackndev.com 1330W: http://hackndev.com
1330S: Maintained 1331S: Maintained
@@ -1614,6 +1615,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
1614L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1615L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1615S: Maintained 1616S: Maintained
1616F: arch/arm/boot/dts/vexpress* 1617F: arch/arm/boot/dts/vexpress*
1618F: arch/arm64/boot/dts/arm/vexpress*
1617F: arch/arm/mach-vexpress/ 1619F: arch/arm/mach-vexpress/
1618F: */*/vexpress* 1620F: */*/vexpress*
1619F: */*/*/vexpress* 1621F: */*/*/vexpress*
@@ -2404,7 +2406,7 @@ F: drivers/gpio/gpio-bt8xx.c
2404BTRFS FILE SYSTEM 2406BTRFS FILE SYSTEM
2405M: Chris Mason <clm@fb.com> 2407M: Chris Mason <clm@fb.com>
2406M: Josef Bacik <jbacik@fb.com> 2408M: Josef Bacik <jbacik@fb.com>
2407M: David Sterba <dsterba@suse.cz> 2409M: David Sterba <dsterba@suse.com>
2408L: linux-btrfs@vger.kernel.org 2410L: linux-btrfs@vger.kernel.org
2409W: http://btrfs.wiki.kernel.org/ 2411W: http://btrfs.wiki.kernel.org/
2410Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 2412Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2562,19 +2564,31 @@ F: arch/powerpc/include/uapi/asm/spu*.h
2562F: arch/powerpc/oprofile/*cell* 2564F: arch/powerpc/oprofile/*cell*
2563F: arch/powerpc/platforms/cell/ 2565F: arch/powerpc/platforms/cell/
2564 2566
2565CEPH DISTRIBUTED FILE SYSTEM CLIENT 2567CEPH COMMON CODE (LIBCEPH)
2568M: Ilya Dryomov <idryomov@gmail.com>
2566M: "Yan, Zheng" <zyan@redhat.com> 2569M: "Yan, Zheng" <zyan@redhat.com>
2567M: Sage Weil <sage@redhat.com> 2570M: Sage Weil <sage@redhat.com>
2568L: ceph-devel@vger.kernel.org 2571L: ceph-devel@vger.kernel.org
2569W: http://ceph.com/ 2572W: http://ceph.com/
2570T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git 2573T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
2574T: git git://github.com/ceph/ceph-client.git
2571S: Supported 2575S: Supported
2572F: Documentation/filesystems/ceph.txt
2573F: fs/ceph/
2574F: net/ceph/ 2576F: net/ceph/
2575F: include/linux/ceph/ 2577F: include/linux/ceph/
2576F: include/linux/crush/ 2578F: include/linux/crush/
2577 2579
2580CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
2581M: "Yan, Zheng" <zyan@redhat.com>
2582M: Sage Weil <sage@redhat.com>
2583M: Ilya Dryomov <idryomov@gmail.com>
2584L: ceph-devel@vger.kernel.org
2585W: http://ceph.com/
2586T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
2587T: git git://github.com/ceph/ceph-client.git
2588S: Supported
2589F: Documentation/filesystems/ceph.txt
2590F: fs/ceph/
2591
2578CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: 2592CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
2579L: linux-usb@vger.kernel.org 2593L: linux-usb@vger.kernel.org
2580S: Orphan 2594S: Orphan
@@ -2735,7 +2749,7 @@ COCCINELLE/Semantic Patches (SmPL)
2735M: Julia Lawall <Julia.Lawall@lip6.fr> 2749M: Julia Lawall <Julia.Lawall@lip6.fr>
2736M: Gilles Muller <Gilles.Muller@lip6.fr> 2750M: Gilles Muller <Gilles.Muller@lip6.fr>
2737M: Nicolas Palix <nicolas.palix@imag.fr> 2751M: Nicolas Palix <nicolas.palix@imag.fr>
2738M: Michal Marek <mmarek@suse.cz> 2752M: Michal Marek <mmarek@suse.com>
2739L: cocci@systeme.lip6.fr (moderated for non-subscribers) 2753L: cocci@systeme.lip6.fr (moderated for non-subscribers)
2740T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc 2754T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
2741W: http://coccinelle.lip6.fr/ 2755W: http://coccinelle.lip6.fr/
@@ -2851,7 +2865,7 @@ F: kernel/cpuset.c
2851 2865
2852CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) 2866CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
2853M: Johannes Weiner <hannes@cmpxchg.org> 2867M: Johannes Weiner <hannes@cmpxchg.org>
2854M: Michal Hocko <mhocko@suse.cz> 2868M: Michal Hocko <mhocko@kernel.org>
2855L: cgroups@vger.kernel.org 2869L: cgroups@vger.kernel.org
2856L: linux-mm@kvack.org 2870L: linux-mm@kvack.org
2857S: Maintained 2871S: Maintained
@@ -2932,7 +2946,7 @@ F: arch/x86/kernel/cpuid.c
2932F: arch/x86/kernel/msr.c 2946F: arch/x86/kernel/msr.c
2933 2947
2934CPU POWER MONITORING SUBSYSTEM 2948CPU POWER MONITORING SUBSYSTEM
2935M: Thomas Renninger <trenn@suse.de> 2949M: Thomas Renninger <trenn@suse.com>
2936L: linux-pm@vger.kernel.org 2950L: linux-pm@vger.kernel.org
2937S: Maintained 2951S: Maintained
2938F: tools/power/cpupower/ 2952F: tools/power/cpupower/
@@ -3162,7 +3176,7 @@ F: Documentation/networking/dmfe.txt
3162F: drivers/net/ethernet/dec/tulip/dmfe.c 3176F: drivers/net/ethernet/dec/tulip/dmfe.c
3163 3177
3164DC390/AM53C974 SCSI driver 3178DC390/AM53C974 SCSI driver
3165M: Hannes Reinecke <hare@suse.de> 3179M: Hannes Reinecke <hare@suse.com>
3166L: linux-scsi@vger.kernel.org 3180L: linux-scsi@vger.kernel.org
3167S: Maintained 3181S: Maintained
3168F: drivers/scsi/am53c974.c 3182F: drivers/scsi/am53c974.c
@@ -3366,7 +3380,7 @@ W: http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
3366S: Maintained 3380S: Maintained
3367 3381
3368DISKQUOTA 3382DISKQUOTA
3369M: Jan Kara <jack@suse.cz> 3383M: Jan Kara <jack@suse.com>
3370S: Maintained 3384S: Maintained
3371F: Documentation/filesystems/quota.txt 3385F: Documentation/filesystems/quota.txt
3372F: fs/quota/ 3386F: fs/quota/
@@ -3422,7 +3436,7 @@ F: Documentation/hwmon/dme1737
3422F: drivers/hwmon/dme1737.c 3436F: drivers/hwmon/dme1737.c
3423 3437
3424DMI/SMBIOS SUPPORT 3438DMI/SMBIOS SUPPORT
3425M: Jean Delvare <jdelvare@suse.de> 3439M: Jean Delvare <jdelvare@suse.com>
3426S: Maintained 3440S: Maintained
3427T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/ 3441T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/
3428F: Documentation/ABI/testing/sysfs-firmware-dmi-tables 3442F: Documentation/ABI/testing/sysfs-firmware-dmi-tables
@@ -3573,6 +3587,15 @@ S: Maintained
3573F: drivers/gpu/drm/rockchip/ 3587F: drivers/gpu/drm/rockchip/
3574F: Documentation/devicetree/bindings/video/rockchip* 3588F: Documentation/devicetree/bindings/video/rockchip*
3575 3589
3590DRM DRIVERS FOR STI
3591M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
3592M: Vincent Abriou <vincent.abriou@st.com>
3593L: dri-devel@lists.freedesktop.org
3594T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
3595S: Maintained
3596F: drivers/gpu/drm/sti
3597F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
3598
3576DSBR100 USB FM RADIO DRIVER 3599DSBR100 USB FM RADIO DRIVER
3577M: Alexey Klimov <klimov.linux@gmail.com> 3600M: Alexey Klimov <klimov.linux@gmail.com>
3578L: linux-media@vger.kernel.org 3601L: linux-media@vger.kernel.org
@@ -4038,7 +4061,7 @@ F: drivers/of/of_mdio.c
4038F: drivers/of/of_net.c 4061F: drivers/of/of_net.c
4039 4062
4040EXT2 FILE SYSTEM 4063EXT2 FILE SYSTEM
4041M: Jan Kara <jack@suse.cz> 4064M: Jan Kara <jack@suse.com>
4042L: linux-ext4@vger.kernel.org 4065L: linux-ext4@vger.kernel.org
4043S: Maintained 4066S: Maintained
4044F: Documentation/filesystems/ext2.txt 4067F: Documentation/filesystems/ext2.txt
@@ -4046,7 +4069,7 @@ F: fs/ext2/
4046F: include/linux/ext2* 4069F: include/linux/ext2*
4047 4070
4048EXT3 FILE SYSTEM 4071EXT3 FILE SYSTEM
4049M: Jan Kara <jack@suse.cz> 4072M: Jan Kara <jack@suse.com>
4050M: Andrew Morton <akpm@linux-foundation.org> 4073M: Andrew Morton <akpm@linux-foundation.org>
4051M: Andreas Dilger <adilger.kernel@dilger.ca> 4074M: Andreas Dilger <adilger.kernel@dilger.ca>
4052L: linux-ext4@vger.kernel.org 4075L: linux-ext4@vger.kernel.org
@@ -4096,7 +4119,7 @@ F: drivers/video/fbdev/exynos/exynos_mipi*
4096F: include/video/exynos_mipi* 4119F: include/video/exynos_mipi*
4097 4120
4098F71805F HARDWARE MONITORING DRIVER 4121F71805F HARDWARE MONITORING DRIVER
4099M: Jean Delvare <jdelvare@suse.de> 4122M: Jean Delvare <jdelvare@suse.com>
4100L: lm-sensors@lm-sensors.org 4123L: lm-sensors@lm-sensors.org
4101S: Maintained 4124S: Maintained
4102F: Documentation/hwmon/f71805f 4125F: Documentation/hwmon/f71805f
@@ -4231,7 +4254,7 @@ S: Maintained
4231F: drivers/block/rsxx/ 4254F: drivers/block/rsxx/
4232 4255
4233FLOPPY DRIVER 4256FLOPPY DRIVER
4234M: Jiri Kosina <jkosina@suse.cz> 4257M: Jiri Kosina <jkosina@suse.com>
4235T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git 4258T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
4236S: Odd fixes 4259S: Odd fixes
4237F: drivers/block/floppy.c 4260F: drivers/block/floppy.c
@@ -4652,7 +4675,7 @@ F: drivers/media/usb/stk1160/
4652 4675
4653H8/300 ARCHITECTURE 4676H8/300 ARCHITECTURE
4654M: Yoshinori Sato <ysato@users.sourceforge.jp> 4677M: Yoshinori Sato <ysato@users.sourceforge.jp>
4655L: uclinux-h8-devel@lists.sourceforge.jp 4678L: uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers)
4656W: http://uclinux-h8.sourceforge.jp 4679W: http://uclinux-h8.sourceforge.jp
4657T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git 4680T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git
4658S: Maintained 4681S: Maintained
@@ -4699,7 +4722,7 @@ S: Maintained
4699F: drivers/media/usb/hackrf/ 4722F: drivers/media/usb/hackrf/
4700 4723
4701HARDWARE MONITORING 4724HARDWARE MONITORING
4702M: Jean Delvare <jdelvare@suse.de> 4725M: Jean Delvare <jdelvare@suse.com>
4703M: Guenter Roeck <linux@roeck-us.net> 4726M: Guenter Roeck <linux@roeck-us.net>
4704L: lm-sensors@lm-sensors.org 4727L: lm-sensors@lm-sensors.org
4705W: http://www.lm-sensors.org/ 4728W: http://www.lm-sensors.org/
@@ -4802,7 +4825,7 @@ F: include/linux/pm.h
4802F: arch/*/include/asm/suspend*.h 4825F: arch/*/include/asm/suspend*.h
4803 4826
4804HID CORE LAYER 4827HID CORE LAYER
4805M: Jiri Kosina <jkosina@suse.cz> 4828M: Jiri Kosina <jkosina@suse.com>
4806L: linux-input@vger.kernel.org 4829L: linux-input@vger.kernel.org
4807T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 4830T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
4808S: Maintained 4831S: Maintained
@@ -4811,7 +4834,7 @@ F: include/linux/hid*
4811F: include/uapi/linux/hid* 4834F: include/uapi/linux/hid*
4812 4835
4813HID SENSOR HUB DRIVERS 4836HID SENSOR HUB DRIVERS
4814M: Jiri Kosina <jkosina@suse.cz> 4837M: Jiri Kosina <jkosina@suse.com>
4815M: Jonathan Cameron <jic23@kernel.org> 4838M: Jonathan Cameron <jic23@kernel.org>
4816M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> 4839M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
4817L: linux-input@vger.kernel.org 4840L: linux-input@vger.kernel.org
@@ -4945,7 +4968,7 @@ F: include/linux/hyperv.h
4945F: tools/hv/ 4968F: tools/hv/
4946 4969
4947I2C OVER PARALLEL PORT 4970I2C OVER PARALLEL PORT
4948M: Jean Delvare <jdelvare@suse.de> 4971M: Jean Delvare <jdelvare@suse.com>
4949L: linux-i2c@vger.kernel.org 4972L: linux-i2c@vger.kernel.org
4950S: Maintained 4973S: Maintained
4951F: Documentation/i2c/busses/i2c-parport 4974F: Documentation/i2c/busses/i2c-parport
@@ -4954,7 +4977,7 @@ F: drivers/i2c/busses/i2c-parport.c
4954F: drivers/i2c/busses/i2c-parport-light.c 4977F: drivers/i2c/busses/i2c-parport-light.c
4955 4978
4956I2C/SMBUS CONTROLLER DRIVERS FOR PC 4979I2C/SMBUS CONTROLLER DRIVERS FOR PC
4957M: Jean Delvare <jdelvare@suse.de> 4980M: Jean Delvare <jdelvare@suse.com>
4958L: linux-i2c@vger.kernel.org 4981L: linux-i2c@vger.kernel.org
4959S: Maintained 4982S: Maintained
4960F: Documentation/i2c/busses/i2c-ali1535 4983F: Documentation/i2c/busses/i2c-ali1535
@@ -4995,7 +5018,7 @@ F: drivers/i2c/busses/i2c-ismt.c
4995F: Documentation/i2c/busses/i2c-ismt 5018F: Documentation/i2c/busses/i2c-ismt
4996 5019
4997I2C/SMBUS STUB DRIVER 5020I2C/SMBUS STUB DRIVER
4998M: Jean Delvare <jdelvare@suse.de> 5021M: Jean Delvare <jdelvare@suse.com>
4999L: linux-i2c@vger.kernel.org 5022L: linux-i2c@vger.kernel.org
5000S: Maintained 5023S: Maintained
5001F: drivers/i2c/i2c-stub.c 5024F: drivers/i2c/i2c-stub.c
@@ -5022,7 +5045,7 @@ L: linux-acpi@vger.kernel.org
5022S: Maintained 5045S: Maintained
5023 5046
5024I2C-TAOS-EVM DRIVER 5047I2C-TAOS-EVM DRIVER
5025M: Jean Delvare <jdelvare@suse.de> 5048M: Jean Delvare <jdelvare@suse.com>
5026L: linux-i2c@vger.kernel.org 5049L: linux-i2c@vger.kernel.org
5027S: Maintained 5050S: Maintained
5028F: Documentation/i2c/busses/i2c-taos-evm 5051F: Documentation/i2c/busses/i2c-taos-evm
@@ -5551,8 +5574,8 @@ F: include/uapi/linux/ip_vs.h
5551F: net/netfilter/ipvs/ 5574F: net/netfilter/ipvs/
5552 5575
5553IPWIRELESS DRIVER 5576IPWIRELESS DRIVER
5554M: Jiri Kosina <jkosina@suse.cz> 5577M: Jiri Kosina <jkosina@suse.com>
5555M: David Sterba <dsterba@suse.cz> 5578M: David Sterba <dsterba@suse.com>
5556S: Odd Fixes 5579S: Odd Fixes
5557F: drivers/tty/ipwireless/ 5580F: drivers/tty/ipwireless/
5558 5581
@@ -5586,6 +5609,7 @@ F: kernel/irq/
5586IRQCHIP DRIVERS 5609IRQCHIP DRIVERS
5587M: Thomas Gleixner <tglx@linutronix.de> 5610M: Thomas Gleixner <tglx@linutronix.de>
5588M: Jason Cooper <jason@lakedaemon.net> 5611M: Jason Cooper <jason@lakedaemon.net>
5612M: Marc Zyngier <marc.zyngier@arm.com>
5589L: linux-kernel@vger.kernel.org 5613L: linux-kernel@vger.kernel.org
5590S: Maintained 5614S: Maintained
5591T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 5615T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5594,11 +5618,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
5594F: drivers/irqchip/ 5618F: drivers/irqchip/
5595 5619
5596IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 5620IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
5597M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 5621M: Jiang Liu <jiang.liu@linux.intel.com>
5622M: Marc Zyngier <marc.zyngier@arm.com>
5598S: Maintained 5623S: Maintained
5624T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
5599F: Documentation/IRQ-domain.txt 5625F: Documentation/IRQ-domain.txt
5600F: include/linux/irqdomain.h 5626F: include/linux/irqdomain.h
5601F: kernel/irq/irqdomain.c 5627F: kernel/irq/irqdomain.c
5628F: kernel/irq/msi.c
5602 5629
5603ISAPNP 5630ISAPNP
5604M: Jaroslav Kysela <perex@perex.cz> 5631M: Jaroslav Kysela <perex@perex.cz>
@@ -5672,7 +5699,7 @@ S: Maintained
5672F: drivers/isdn/hardware/eicon/ 5699F: drivers/isdn/hardware/eicon/
5673 5700
5674IT87 HARDWARE MONITORING DRIVER 5701IT87 HARDWARE MONITORING DRIVER
5675M: Jean Delvare <jdelvare@suse.de> 5702M: Jean Delvare <jdelvare@suse.com>
5676L: lm-sensors@lm-sensors.org 5703L: lm-sensors@lm-sensors.org
5677S: Maintained 5704S: Maintained
5678F: Documentation/hwmon/it87 5705F: Documentation/hwmon/it87
@@ -5739,7 +5766,7 @@ F: include/uapi/linux/jffs2.h
5739 5766
5740JOURNALLING LAYER FOR BLOCK DEVICES (JBD) 5767JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
5741M: Andrew Morton <akpm@linux-foundation.org> 5768M: Andrew Morton <akpm@linux-foundation.org>
5742M: Jan Kara <jack@suse.cz> 5769M: Jan Kara <jack@suse.com>
5743L: linux-ext4@vger.kernel.org 5770L: linux-ext4@vger.kernel.org
5744S: Maintained 5771S: Maintained
5745F: fs/jbd/ 5772F: fs/jbd/
@@ -5803,7 +5830,7 @@ S: Maintained
5803F: fs/autofs4/ 5830F: fs/autofs4/
5804 5831
5805KERNEL BUILD + files below scripts/ (unless maintained elsewhere) 5832KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
5806M: Michal Marek <mmarek@suse.cz> 5833M: Michal Marek <mmarek@suse.com>
5807T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next 5834T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
5808T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes 5835T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
5809L: linux-kbuild@vger.kernel.org 5836L: linux-kbuild@vger.kernel.org
@@ -5867,7 +5894,7 @@ F: arch/x86/include/asm/svm.h
5867F: arch/x86/kvm/svm.c 5894F: arch/x86/kvm/svm.c
5868 5895
5869KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC 5896KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
5870M: Alexander Graf <agraf@suse.de> 5897M: Alexander Graf <agraf@suse.com>
5871L: kvm-ppc@vger.kernel.org 5898L: kvm-ppc@vger.kernel.org
5872W: http://kvm.qumranet.com 5899W: http://kvm.qumranet.com
5873T: git git://github.com/agraf/linux-2.6.git 5900T: git git://github.com/agraf/linux-2.6.git
@@ -5885,7 +5912,6 @@ S: Supported
5885F: Documentation/s390/kvm.txt 5912F: Documentation/s390/kvm.txt
5886F: arch/s390/include/asm/kvm* 5913F: arch/s390/include/asm/kvm*
5887F: arch/s390/kvm/ 5914F: arch/s390/kvm/
5888F: drivers/s390/kvm/
5889 5915
5890KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5916KERNEL VIRTUAL MACHINE (KVM) FOR ARM
5891M: Christoffer Dall <christoffer.dall@linaro.org> 5917M: Christoffer Dall <christoffer.dall@linaro.org>
@@ -6024,7 +6050,7 @@ F: drivers/leds/
6024F: include/linux/leds.h 6050F: include/linux/leds.h
6025 6051
6026LEGACY EEPROM DRIVER 6052LEGACY EEPROM DRIVER
6027M: Jean Delvare <jdelvare@suse.de> 6053M: Jean Delvare <jdelvare@suse.com>
6028S: Maintained 6054S: Maintained
6029F: Documentation/misc-devices/eeprom 6055F: Documentation/misc-devices/eeprom
6030F: drivers/misc/eeprom/eeprom.c 6056F: drivers/misc/eeprom/eeprom.c
@@ -6077,7 +6103,7 @@ F: include/linux/ata.h
6077F: include/linux/libata.h 6103F: include/linux/libata.h
6078 6104
6079LIBATA PATA ARASAN COMPACT FLASH CONTROLLER 6105LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
6080M: Viresh Kumar <viresh.linux@gmail.com> 6106M: Viresh Kumar <vireshk@kernel.org>
6081L: linux-ide@vger.kernel.org 6107L: linux-ide@vger.kernel.org
6082T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git 6108T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
6083S: Maintained 6109S: Maintained
@@ -6147,6 +6173,7 @@ L: linux-nvdimm@lists.01.org
6147Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ 6173Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
6148S: Supported 6174S: Supported
6149F: drivers/nvdimm/pmem.c 6175F: drivers/nvdimm/pmem.c
6176F: include/linux/pmem.h
6150 6177
6151LINUX FOR IBM pSERIES (RS/6000) 6178LINUX FOR IBM pSERIES (RS/6000)
6152M: Paul Mackerras <paulus@au.ibm.com> 6179M: Paul Mackerras <paulus@au.ibm.com>
@@ -6161,7 +6188,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
6161W: http://www.penguinppc.org/ 6188W: http://www.penguinppc.org/
6162L: linuxppc-dev@lists.ozlabs.org 6189L: linuxppc-dev@lists.ozlabs.org
6163Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ 6190Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
6164T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git 6191T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
6165S: Supported 6192S: Supported
6166F: Documentation/powerpc/ 6193F: Documentation/powerpc/
6167F: arch/powerpc/ 6194F: arch/powerpc/
@@ -6237,8 +6264,8 @@ F: drivers/platform/x86/hp_accel.c
6237LIVE PATCHING 6264LIVE PATCHING
6238M: Josh Poimboeuf <jpoimboe@redhat.com> 6265M: Josh Poimboeuf <jpoimboe@redhat.com>
6239M: Seth Jennings <sjenning@redhat.com> 6266M: Seth Jennings <sjenning@redhat.com>
6240M: Jiri Kosina <jkosina@suse.cz> 6267M: Jiri Kosina <jkosina@suse.com>
6241M: Vojtech Pavlik <vojtech@suse.cz> 6268M: Vojtech Pavlik <vojtech@suse.com>
6242S: Maintained 6269S: Maintained
6243F: kernel/livepatch/ 6270F: kernel/livepatch/
6244F: include/linux/livepatch.h 6271F: include/linux/livepatch.h
@@ -6264,21 +6291,21 @@ S: Maintained
6264F: drivers/hwmon/lm73.c 6291F: drivers/hwmon/lm73.c
6265 6292
6266LM78 HARDWARE MONITOR DRIVER 6293LM78 HARDWARE MONITOR DRIVER
6267M: Jean Delvare <jdelvare@suse.de> 6294M: Jean Delvare <jdelvare@suse.com>
6268L: lm-sensors@lm-sensors.org 6295L: lm-sensors@lm-sensors.org
6269S: Maintained 6296S: Maintained
6270F: Documentation/hwmon/lm78 6297F: Documentation/hwmon/lm78
6271F: drivers/hwmon/lm78.c 6298F: drivers/hwmon/lm78.c
6272 6299
6273LM83 HARDWARE MONITOR DRIVER 6300LM83 HARDWARE MONITOR DRIVER
6274M: Jean Delvare <jdelvare@suse.de> 6301M: Jean Delvare <jdelvare@suse.com>
6275L: lm-sensors@lm-sensors.org 6302L: lm-sensors@lm-sensors.org
6276S: Maintained 6303S: Maintained
6277F: Documentation/hwmon/lm83 6304F: Documentation/hwmon/lm83
6278F: drivers/hwmon/lm83.c 6305F: drivers/hwmon/lm83.c
6279 6306
6280LM90 HARDWARE MONITOR DRIVER 6307LM90 HARDWARE MONITOR DRIVER
6281M: Jean Delvare <jdelvare@suse.de> 6308M: Jean Delvare <jdelvare@suse.com>
6282L: lm-sensors@lm-sensors.org 6309L: lm-sensors@lm-sensors.org
6283S: Maintained 6310S: Maintained
6284F: Documentation/hwmon/lm90 6311F: Documentation/hwmon/lm90
@@ -6824,6 +6851,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
6824S: Maintained 6851S: Maintained
6825F: drivers/media/usb/msi2500/ 6852F: drivers/media/usb/msi2500/
6826 6853
6854MSYSTEMS DISKONCHIP G3 MTD DRIVER
6855M: Robert Jarzmik <robert.jarzmik@free.fr>
6856L: linux-mtd@lists.infradead.org
6857S: Maintained
6858F: drivers/mtd/devices/docg3*
6859
6827MT9M032 APTINA SENSOR DRIVER 6860MT9M032 APTINA SENSOR DRIVER
6828M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 6861M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6829L: linux-media@vger.kernel.org 6862L: linux-media@vger.kernel.org
@@ -7005,6 +7038,7 @@ F: include/uapi/linux/netfilter/
7005F: net/*/netfilter.c 7038F: net/*/netfilter.c
7006F: net/*/netfilter/ 7039F: net/*/netfilter/
7007F: net/netfilter/ 7040F: net/netfilter/
7041F: net/bridge/br_netfilter*.c
7008 7042
7009NETLABEL 7043NETLABEL
7010M: Paul Moore <paul@paul-moore.com> 7044M: Paul Moore <paul@paul-moore.com>
@@ -7704,7 +7738,7 @@ S: Maintained
7704F: drivers/char/pc8736x_gpio.c 7738F: drivers/char/pc8736x_gpio.c
7705 7739
7706PC87427 HARDWARE MONITORING DRIVER 7740PC87427 HARDWARE MONITORING DRIVER
7707M: Jean Delvare <jdelvare@suse.de> 7741M: Jean Delvare <jdelvare@suse.com>
7708L: lm-sensors@lm-sensors.org 7742L: lm-sensors@lm-sensors.org
7709S: Maintained 7743S: Maintained
7710F: Documentation/hwmon/pc87427 7744F: Documentation/hwmon/pc87427
@@ -7981,7 +8015,7 @@ S: Maintained
7981F: drivers/pinctrl/samsung/ 8015F: drivers/pinctrl/samsung/
7982 8016
7983PIN CONTROLLER - ST SPEAR 8017PIN CONTROLLER - ST SPEAR
7984M: Viresh Kumar <viresh.linux@gmail.com> 8018M: Viresh Kumar <vireshk@kernel.org>
7985L: spear-devel@list.st.com 8019L: spear-devel@list.st.com
7986L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 8020L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
7987W: http://www.st.com/spear 8021W: http://www.st.com/spear
@@ -7989,7 +8023,7 @@ S: Maintained
7989F: drivers/pinctrl/spear/ 8023F: drivers/pinctrl/spear/
7990 8024
7991PKTCDVD DRIVER 8025PKTCDVD DRIVER
7992M: Jiri Kosina <jkosina@suse.cz> 8026M: Jiri Kosina <jkosina@suse.com>
7993S: Maintained 8027S: Maintained
7994F: drivers/block/pktcdvd.c 8028F: drivers/block/pktcdvd.c
7995F: include/linux/pktcdvd.h 8029F: include/linux/pktcdvd.h
@@ -8366,10 +8400,12 @@ RADOS BLOCK DEVICE (RBD)
8366M: Ilya Dryomov <idryomov@gmail.com> 8400M: Ilya Dryomov <idryomov@gmail.com>
8367M: Sage Weil <sage@redhat.com> 8401M: Sage Weil <sage@redhat.com>
8368M: Alex Elder <elder@kernel.org> 8402M: Alex Elder <elder@kernel.org>
8369M: ceph-devel@vger.kernel.org 8403L: ceph-devel@vger.kernel.org
8370W: http://ceph.com/ 8404W: http://ceph.com/
8371T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git 8405T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
8406T: git git://github.com/ceph/ceph-client.git
8372S: Supported 8407S: Supported
8408F: Documentation/ABI/testing/sysfs-bus-rbd
8373F: drivers/block/rbd.c 8409F: drivers/block/rbd.c
8374F: drivers/block/rbd_types.h 8410F: drivers/block/rbd_types.h
8375 8411
@@ -8878,7 +8914,7 @@ S: Maintained
8878F: drivers/tty/serial/ 8914F: drivers/tty/serial/
8879 8915
8880SYNOPSYS DESIGNWARE DMAC DRIVER 8916SYNOPSYS DESIGNWARE DMAC DRIVER
8881M: Viresh Kumar <viresh.linux@gmail.com> 8917M: Viresh Kumar <vireshk@kernel.org>
8882M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8918M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8883S: Maintained 8919S: Maintained
8884F: include/linux/dma/dw.h 8920F: include/linux/dma/dw.h
@@ -9045,7 +9081,7 @@ S: Maintained
9045F: drivers/mmc/host/sdhci-s3c* 9081F: drivers/mmc/host/sdhci-s3c*
9046 9082
9047SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER 9083SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
9048M: Viresh Kumar <viresh.linux@gmail.com> 9084M: Viresh Kumar <vireshk@kernel.org>
9049L: spear-devel@list.st.com 9085L: spear-devel@list.st.com
9050L: linux-mmc@vger.kernel.org 9086L: linux-mmc@vger.kernel.org
9051S: Maintained 9087S: Maintained
@@ -9407,7 +9443,7 @@ F: Documentation/hwmon/sch5627
9407F: drivers/hwmon/sch5627.c 9443F: drivers/hwmon/sch5627.c
9408 9444
9409SMSC47B397 HARDWARE MONITOR DRIVER 9445SMSC47B397 HARDWARE MONITOR DRIVER
9410M: Jean Delvare <jdelvare@suse.de> 9446M: Jean Delvare <jdelvare@suse.com>
9411L: lm-sensors@lm-sensors.org 9447L: lm-sensors@lm-sensors.org
9412S: Maintained 9448S: Maintained
9413F: Documentation/hwmon/smsc47b397 9449F: Documentation/hwmon/smsc47b397
@@ -9456,7 +9492,7 @@ S: Supported
9456F: drivers/media/pci/solo6x10/ 9492F: drivers/media/pci/solo6x10/
9457 9493
9458SOFTWARE RAID (Multiple Disks) SUPPORT 9494SOFTWARE RAID (Multiple Disks) SUPPORT
9459M: Neil Brown <neilb@suse.de> 9495M: Neil Brown <neilb@suse.com>
9460L: linux-raid@vger.kernel.org 9496L: linux-raid@vger.kernel.org
9461S: Supported 9497S: Supported
9462F: drivers/md/ 9498F: drivers/md/
@@ -9499,7 +9535,7 @@ F: drivers/memstick/core/ms_block.*
9499 9535
9500SOUND 9536SOUND
9501M: Jaroslav Kysela <perex@perex.cz> 9537M: Jaroslav Kysela <perex@perex.cz>
9502M: Takashi Iwai <tiwai@suse.de> 9538M: Takashi Iwai <tiwai@suse.com>
9503L: alsa-devel@alsa-project.org (moderated for non-subscribers) 9539L: alsa-devel@alsa-project.org (moderated for non-subscribers)
9504W: http://www.alsa-project.org/ 9540W: http://www.alsa-project.org/
9505T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git 9541T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
@@ -9583,7 +9619,7 @@ S: Maintained
9583F: include/linux/compiler.h 9619F: include/linux/compiler.h
9584 9620
9585SPEAR PLATFORM SUPPORT 9621SPEAR PLATFORM SUPPORT
9586M: Viresh Kumar <viresh.linux@gmail.com> 9622M: Viresh Kumar <vireshk@kernel.org>
9587M: Shiraz Hashim <shiraz.linux.kernel@gmail.com> 9623M: Shiraz Hashim <shiraz.linux.kernel@gmail.com>
9588L: spear-devel@list.st.com 9624L: spear-devel@list.st.com
9589L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9625L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9592,7 +9628,7 @@ S: Maintained
9592F: arch/arm/mach-spear/ 9628F: arch/arm/mach-spear/
9593 9629
9594SPEAR CLOCK FRAMEWORK SUPPORT 9630SPEAR CLOCK FRAMEWORK SUPPORT
9595M: Viresh Kumar <viresh.linux@gmail.com> 9631M: Viresh Kumar <vireshk@kernel.org>
9596L: spear-devel@list.st.com 9632L: spear-devel@list.st.com
9597L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 9633L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
9598W: http://www.st.com/spear 9634W: http://www.st.com/spear
@@ -10382,7 +10418,7 @@ K: ^Subject:.*(?i)trivial
10382 10418
10383TTY LAYER 10419TTY LAYER
10384M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 10420M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
10385M: Jiri Slaby <jslaby@suse.cz> 10421M: Jiri Slaby <jslaby@suse.com>
10386S: Supported 10422S: Supported
10387T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git 10423T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
10388F: Documentation/serial/ 10424F: Documentation/serial/
@@ -10456,7 +10492,7 @@ F: arch/m68k/*/*_no.*
10456F: arch/m68k/include/asm/*_no.* 10492F: arch/m68k/include/asm/*_no.*
10457 10493
10458UDF FILESYSTEM 10494UDF FILESYSTEM
10459M: Jan Kara <jack@suse.cz> 10495M: Jan Kara <jack@suse.com>
10460S: Maintained 10496S: Maintained
10461F: Documentation/filesystems/udf.txt 10497F: Documentation/filesystems/udf.txt
10462F: fs/udf/ 10498F: fs/udf/
@@ -10599,7 +10635,7 @@ F: drivers/usb/gadget/
10599F: include/linux/usb/gadget* 10635F: include/linux/usb/gadget*
10600 10636
10601USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) 10637USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
10602M: Jiri Kosina <jkosina@suse.cz> 10638M: Jiri Kosina <jkosina@suse.com>
10603L: linux-usb@vger.kernel.org 10639L: linux-usb@vger.kernel.org
10604T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 10640T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
10605S: Maintained 10641S: Maintained
@@ -10724,7 +10760,7 @@ S: Maintained
10724F: drivers/usb/host/uhci* 10760F: drivers/usb/host/uhci*
10725 10761
10726USB "USBNET" DRIVER FRAMEWORK 10762USB "USBNET" DRIVER FRAMEWORK
10727M: Oliver Neukum <oneukum@suse.de> 10763M: Oliver Neukum <oneukum@suse.com>
10728L: netdev@vger.kernel.org 10764L: netdev@vger.kernel.org
10729W: http://www.linux-usb.org/usbnet 10765W: http://www.linux-usb.org/usbnet
10730S: Maintained 10766S: Maintained
@@ -10878,6 +10914,15 @@ F: drivers/block/virtio_blk.c
10878F: include/linux/virtio_*.h 10914F: include/linux/virtio_*.h
10879F: include/uapi/linux/virtio_*.h 10915F: include/uapi/linux/virtio_*.h
10880 10916
10917VIRTIO DRIVERS FOR S390
10918M: Christian Borntraeger <borntraeger@de.ibm.com>
10919M: Cornelia Huck <cornelia.huck@de.ibm.com>
10920L: linux-s390@vger.kernel.org
10921L: virtualization@lists.linux-foundation.org
10922L: kvm@vger.kernel.org
10923S: Supported
10924F: drivers/s390/virtio/
10925
10881VIRTIO GPU DRIVER 10926VIRTIO GPU DRIVER
10882M: David Airlie <airlied@linux.ie> 10927M: David Airlie <airlied@linux.ie>
10883M: Gerd Hoffmann <kraxel@redhat.com> 10928M: Gerd Hoffmann <kraxel@redhat.com>
@@ -11051,7 +11096,7 @@ F: Documentation/hwmon/w83793
11051F: drivers/hwmon/w83793.c 11096F: drivers/hwmon/w83793.c
11052 11097
11053W83795 HARDWARE MONITORING DRIVER 11098W83795 HARDWARE MONITORING DRIVER
11054M: Jean Delvare <jdelvare@suse.de> 11099M: Jean Delvare <jdelvare@suse.com>
11055L: lm-sensors@lm-sensors.org 11100L: lm-sensors@lm-sensors.org
11056S: Maintained 11101S: Maintained
11057F: drivers/hwmon/w83795.c 11102F: drivers/hwmon/w83795.c
diff --git a/Makefile b/Makefile
index 13270c0a9336..246053f04fb5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc8
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
597# Defaults to vmlinux, but the arch makefile usually adds further targets 597# Defaults to vmlinux, but the arch makefile usually adds further targets
598all: vmlinux 598all: vmlinux
599 599
600# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
601# values of the respective KBUILD_* variables
602ARCH_CPPFLAGS :=
603ARCH_AFLAGS :=
604ARCH_CFLAGS :=
600include arch/$(SRCARCH)/Makefile 605include arch/$(SRCARCH)/Makefile
601 606
602KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 607KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -780,10 +785,11 @@ endif
780include scripts/Makefile.kasan 785include scripts/Makefile.kasan
781include scripts/Makefile.extrawarn 786include scripts/Makefile.extrawarn
782 787
783# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments 788# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
784KBUILD_CPPFLAGS += $(KCPPFLAGS) 789# last assignments
785KBUILD_AFLAGS += $(KAFLAGS) 790KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
786KBUILD_CFLAGS += $(KCFLAGS) 791KBUILD_AFLAGS += $(ARCH_AFLAGS) $(KAFLAGS)
792KBUILD_CFLAGS += $(ARCH_CFLAGS) $(KCFLAGS)
787 793
788# Use --build-id when available. 794# Use --build-id when available.
789LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\ 795LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
@@ -847,10 +853,10 @@ export mod_strip_cmd
847mod_compress_cmd = true 853mod_compress_cmd = true
848ifdef CONFIG_MODULE_COMPRESS 854ifdef CONFIG_MODULE_COMPRESS
849 ifdef CONFIG_MODULE_COMPRESS_GZIP 855 ifdef CONFIG_MODULE_COMPRESS_GZIP
850 mod_compress_cmd = gzip -n 856 mod_compress_cmd = gzip -n -f
851 endif # CONFIG_MODULE_COMPRESS_GZIP 857 endif # CONFIG_MODULE_COMPRESS_GZIP
852 ifdef CONFIG_MODULE_COMPRESS_XZ 858 ifdef CONFIG_MODULE_COMPRESS_XZ
853 mod_compress_cmd = xz 859 mod_compress_cmd = xz -f
854 endif # CONFIG_MODULE_COMPRESS_XZ 860 endif # CONFIG_MODULE_COMPRESS_XZ
855endif # CONFIG_MODULE_COMPRESS 861endif # CONFIG_MODULE_COMPRESS
856export mod_compress_cmd 862export mod_compress_cmd
diff --git a/arch/Kconfig b/arch/Kconfig
index bec6666a3cc4..8a8ea7110de8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR
221config ARCH_THREAD_INFO_ALLOCATOR 221config ARCH_THREAD_INFO_ALLOCATOR
222 bool 222 bool
223 223
224# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
225config ARCH_WANTS_DYNAMIC_TASK_STRUCT
226 bool
227
224config HAVE_REGS_AND_STACK_ACCESS_API 228config HAVE_REGS_AND_STACK_ACCESS_API
225 bool 229 bool
226 help 230 help
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index cde23cd03609..ffd9cf5ec8c4 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cputime.h
5generic-y += exec.h 5generic-y += exec.h
6generic-y += irq_work.h 6generic-y += irq_work.h
7generic-y += mcs_spinlock.h 7generic-y += mcs_spinlock.h
8generic-y += mm-arch-hooks.h
8generic-y += preempt.h 9generic-y += preempt.h
9generic-y += sections.h 10generic-y += sections.h
10generic-y += trace_clock.h 11generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/mm-arch-hooks.h b/arch/alpha/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b07fd862fec3..000000000000
--- a/arch/alpha/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ALPHA_MM_ARCH_HOOKS_H
13#define _ASM_ALPHA_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ALPHA_MM_ARCH_HOOKS_H */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e7cee0a5c56d..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -115,6 +115,7 @@ if ISA_ARCOMPACT
115 115
116config ARC_CPU_750D 116config ARC_CPU_750D
117 bool "ARC750D" 117 bool "ARC750D"
118 select ARC_CANT_LLSC
118 help 119 help
119 Support for ARC750 core 120 Support for ARC750 core
120 121
@@ -312,11 +313,11 @@ config ARC_PAGE_SIZE_8K
312 313
313config ARC_PAGE_SIZE_16K 314config ARC_PAGE_SIZE_16K
314 bool "16KB" 315 bool "16KB"
315 depends on ARC_MMU_V3 316 depends on ARC_MMU_V3 || ARC_MMU_V4
316 317
317config ARC_PAGE_SIZE_4K 318config ARC_PAGE_SIZE_4K
318 bool "4KB" 319 bool "4KB"
319 depends on ARC_MMU_V3 320 depends on ARC_MMU_V3 || ARC_MMU_V4
320 321
321endchoice 322endchoice
322 323
@@ -362,7 +363,12 @@ config ARC_CANT_LLSC
362config ARC_HAS_LLSC 363config ARC_HAS_LLSC
363 bool "Insn: LLOCK/SCOND (efficient atomic ops)" 364 bool "Insn: LLOCK/SCOND (efficient atomic ops)"
364 default y 365 default y
365 depends on !ARC_CPU_750D && !ARC_CANT_LLSC 366 depends on !ARC_CANT_LLSC
367
368config ARC_STAR_9000923308
369 bool "Workaround for llock/scond livelock"
370 default y
371 depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
366 372
367config ARC_HAS_SWAPE 373config ARC_HAS_SWAPE
368 bool "Insn: SWAPE (endian-swap)" 374 bool "Insn: SWAPE (endian-swap)"
@@ -378,6 +384,10 @@ config ARC_HAS_LL64
378 dest operands with 2 possible source operands. 384 dest operands with 2 possible source operands.
379 default y 385 default y
380 386
387config ARC_HAS_DIV_REM
388 bool "Insn: div, divu, rem, remu"
389 default y
390
381config ARC_HAS_RTC 391config ARC_HAS_RTC
382 bool "Local 64-bit r/o cycle counter" 392 bool "Local 64-bit r/o cycle counter"
383 default n 393 default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 6107062c0111..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
38 38
39ifdef CONFIG_ISA_ARCV2
40
39ifndef CONFIG_ARC_HAS_LL64 41ifndef CONFIG_ARC_HAS_LL64
40cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 42cflags-y += -mno-ll64
43endif
44
45ifndef CONFIG_ARC_HAS_DIV_REM
46cflags-y += -mno-div-rem
47endif
48
41endif 49endif
42 50
43cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables 51cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
@@ -49,7 +57,8 @@ endif
49 57
50ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 58ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
51# Generic build system uses -O2, we want -O3 59# Generic build system uses -O2, we want -O3
52cflags-y += -O3 60# Note: No need to add to cflags-y as that happens anyways
61ARCH_CFLAGS += -O3
53endif 62endif
54 63
55# small data is default for elf32 tool-chain. If not usable, disable it 64# small data is default for elf32 tool-chain. If not usable, disable it
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 15c8d6226c9d..1cd5e82f5dc2 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -12,7 +12,7 @@
12 12
13/ { 13/ {
14 compatible = "snps,arc"; 14 compatible = "snps,arc";
15 clock-frequency = <75000000>; 15 clock-frequency = <90000000>;
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 18
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index 199d42820eca..2f0b33257db2 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -12,7 +12,7 @@
12 12
13/ { 13/ {
14 compatible = "snps,arc"; 14 compatible = "snps,arc";
15 clock-frequency = <75000000>; 15 clock-frequency = <90000000>;
16 #address-cells = <1>; 16 #address-cells = <1>;
17 #size-cells = <1>; 17 #size-cells = <1>;
18 18
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 1a80cc91a03b..7611b10a2d23 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += kvm_para.h
22generic-y += local.h 22generic-y += local.h
23generic-y += local64.h 23generic-y += local64.h
24generic-y += mcs_spinlock.h 24generic-y += mcs_spinlock.h
25generic-y += mm-arch-hooks.h
25generic-y += mman.h 26generic-y += mman.h
26generic-y += msgbuf.h 27generic-y += msgbuf.h
27generic-y += param.h 28generic-y += param.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
89#define ECR_C_BIT_DTLB_LD_MISS 8 89#define ECR_C_BIT_DTLB_LD_MISS 8
90#define ECR_C_BIT_DTLB_ST_MISS 9 90#define ECR_C_BIT_DTLB_ST_MISS 9
91 91
92
93/* Auxiliary registers */ 92/* Auxiliary registers */
94#define AUX_IDENTITY 4 93#define AUX_IDENTITY 4
95#define AUX_INTR_VEC_BASE 0x25 94#define AUX_INTR_VEC_BASE 0x25
96 95#define AUX_NON_VOL 0x5e
97 96
98/* 97/*
99 * Floating Pt Registers 98 * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
240 239
241struct bcr_perip { 240struct bcr_perip {
242#ifdef CONFIG_CPU_BIG_ENDIAN 241#ifdef CONFIG_CPU_BIG_ENDIAN
243 unsigned int start:8, pad2:8, sz:8, pad:8; 242 unsigned int start:8, pad2:8, sz:8, ver:8;
244#else 243#else
245 unsigned int pad:8, sz:8, pad2:8, start:8; 244 unsigned int ver:8, sz:8, pad2:8, start:8;
246#endif 245#endif
247}; 246};
248 247
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
23 23
24#define atomic_set(v, i) (((v)->counter) = (i)) 24#define atomic_set(v, i) (((v)->counter) = (i))
25 25
26#ifdef CONFIG_ISA_ARCV2 26#ifdef CONFIG_ARC_STAR_9000923308
27#define PREFETCHW " prefetchw [%1] \n" 27
28#else 28#define SCOND_FAIL_RETRY_VAR_DEF \
29#define PREFETCHW 29 unsigned int delay = 1, tmp; \
30
31#define SCOND_FAIL_RETRY_ASM \
32 " bz 4f \n" \
33 " ; --- scond fail delay --- \n" \
34 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
35 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
36 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
37 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
38 " b 1b \n" /* start over */ \
39 "4: ; --- success --- \n" \
40
41#define SCOND_FAIL_RETRY_VARS \
42 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
43
44#else /* !CONFIG_ARC_STAR_9000923308 */
45
46#define SCOND_FAIL_RETRY_VAR_DEF
47
48#define SCOND_FAIL_RETRY_ASM \
49 " bnz 1b \n" \
50
51#define SCOND_FAIL_RETRY_VARS
52
30#endif 53#endif
31 54
32#define ATOMIC_OP(op, c_op, asm_op) \ 55#define ATOMIC_OP(op, c_op, asm_op) \
33static inline void atomic_##op(int i, atomic_t *v) \ 56static inline void atomic_##op(int i, atomic_t *v) \
34{ \ 57{ \
35 unsigned int temp; \ 58 unsigned int val; \
59 SCOND_FAIL_RETRY_VAR_DEF \
36 \ 60 \
37 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
38 "1: \n" \ 62 "1: llock %[val], [%[ctr]] \n" \
39 PREFETCHW \ 63 " " #asm_op " %[val], %[val], %[i] \n" \
40 " llock %0, [%1] \n" \ 64 " scond %[val], [%[ctr]] \n" \
41 " " #asm_op " %0, %0, %2 \n" \ 65 " \n" \
42 " scond %0, [%1] \n" \ 66 SCOND_FAIL_RETRY_ASM \
43 " bnz 1b \n" \ 67 \
44 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ 68 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
45 : "r"(&v->counter), "ir"(i) \ 69 SCOND_FAIL_RETRY_VARS \
70 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
71 [i] "ir" (i) \
46 : "cc"); \ 72 : "cc"); \
47} \ 73} \
48 74
49#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 75#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
50static inline int atomic_##op##_return(int i, atomic_t *v) \ 76static inline int atomic_##op##_return(int i, atomic_t *v) \
51{ \ 77{ \
52 unsigned int temp; \ 78 unsigned int val; \
79 SCOND_FAIL_RETRY_VAR_DEF \
53 \ 80 \
54 /* \ 81 /* \
55 * Explicit full memory barrier needed before/after as \ 82 * Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
58 smp_mb(); \ 85 smp_mb(); \
59 \ 86 \
60 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
61 "1: \n" \ 88 "1: llock %[val], [%[ctr]] \n" \
62 PREFETCHW \ 89 " " #asm_op " %[val], %[val], %[i] \n" \
63 " llock %0, [%1] \n" \ 90 " scond %[val], [%[ctr]] \n" \
64 " " #asm_op " %0, %0, %2 \n" \ 91 " \n" \
65 " scond %0, [%1] \n" \ 92 SCOND_FAIL_RETRY_ASM \
66 " bnz 1b \n" \ 93 \
67 : "=&r"(temp) \ 94 : [val] "=&r" (val) \
68 : "r"(&v->counter), "ir"(i) \ 95 SCOND_FAIL_RETRY_VARS \
96 : [ctr] "r" (&v->counter), \
97 [i] "ir" (i) \
69 : "cc"); \ 98 : "cc"); \
70 \ 99 \
71 smp_mb(); \ 100 smp_mb(); \
72 \ 101 \
73 return temp; \ 102 return val; \
74} 103}
75 104
76#else /* !CONFIG_ARC_HAS_LLSC */ 105#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
150#undef ATOMIC_OPS 179#undef ATOMIC_OPS
151#undef ATOMIC_OP_RETURN 180#undef ATOMIC_OP_RETURN
152#undef ATOMIC_OP 181#undef ATOMIC_OP
182#undef SCOND_FAIL_RETRY_VAR_DEF
183#undef SCOND_FAIL_RETRY_ASM
184#undef SCOND_FAIL_RETRY_VARS
153 185
154/** 186/**
155 * __atomic_add_unless - add unless the number is a given value 187 * __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 99fe118d3730..57c1f33844d4 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
50 * done for const @nr, but no code is generated due to gcc \ 50 * done for const @nr, but no code is generated due to gcc \
51 * const prop. \ 51 * const prop. \
52 */ \ 52 */ \
53 if (__builtin_constant_p(nr)) \ 53 nr &= 0x1f; \
54 nr &= 0x1f; \
55 \ 54 \
56 __asm__ __volatile__( \ 55 __asm__ __volatile__( \
57 "1: llock %0, [%1] \n" \ 56 "1: llock %0, [%1] \n" \
@@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
82 \ 81 \
83 m += nr >> 5; \ 82 m += nr >> 5; \
84 \ 83 \
85 if (__builtin_constant_p(nr)) \ 84 nr &= 0x1f; \
86 nr &= 0x1f; \
87 \ 85 \
88 /* \ 86 /* \
89 * Explicit full memory barrier needed before/after as \ 87 * Explicit full memory barrier needed before/after as \
@@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
129 unsigned long temp, flags; \ 127 unsigned long temp, flags; \
130 m += nr >> 5; \ 128 m += nr >> 5; \
131 \ 129 \
132 if (__builtin_constant_p(nr)) \
133 nr &= 0x1f; \
134 \
135 /* \ 130 /* \
136 * spin lock/unlock provide the needed smp_mb() before/after \ 131 * spin lock/unlock provide the needed smp_mb() before/after \
137 */ \ 132 */ \
138 bitops_lock(flags); \ 133 bitops_lock(flags); \
139 \ 134 \
140 temp = *m; \ 135 temp = *m; \
141 *m = temp c_op (1UL << nr); \ 136 *m = temp c_op (1UL << (nr & 0x1f)); \
142 \ 137 \
143 bitops_unlock(flags); \ 138 bitops_unlock(flags); \
144} 139}
@@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
149 unsigned long old, flags; \ 144 unsigned long old, flags; \
150 m += nr >> 5; \ 145 m += nr >> 5; \
151 \ 146 \
152 if (__builtin_constant_p(nr)) \
153 nr &= 0x1f; \
154 \
155 bitops_lock(flags); \ 147 bitops_lock(flags); \
156 \ 148 \
157 old = *m; \ 149 old = *m; \
158 *m = old c_op (1 << nr); \ 150 *m = old c_op (1UL << (nr & 0x1f)); \
159 \ 151 \
160 bitops_unlock(flags); \ 152 bitops_unlock(flags); \
161 \ 153 \
162 return (old & (1 << nr)) != 0; \ 154 return (old & (1UL << (nr & 0x1f))) != 0; \
163} 155}
164 156
165#endif /* CONFIG_ARC_HAS_LLSC */ 157#endif /* CONFIG_ARC_HAS_LLSC */
@@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
174 unsigned long temp; \ 166 unsigned long temp; \
175 m += nr >> 5; \ 167 m += nr >> 5; \
176 \ 168 \
177 if (__builtin_constant_p(nr)) \
178 nr &= 0x1f; \
179 \
180 temp = *m; \ 169 temp = *m; \
181 *m = temp c_op (1UL << nr); \ 170 *m = temp c_op (1UL << (nr & 0x1f)); \
182} 171}
183 172
184#define __TEST_N_BIT_OP(op, c_op, asm_op) \ 173#define __TEST_N_BIT_OP(op, c_op, asm_op) \
@@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
187 unsigned long old; \ 176 unsigned long old; \
188 m += nr >> 5; \ 177 m += nr >> 5; \
189 \ 178 \
190 if (__builtin_constant_p(nr)) \
191 nr &= 0x1f; \
192 \
193 old = *m; \ 179 old = *m; \
194 *m = old c_op (1 << nr); \ 180 *m = old c_op (1UL << (nr & 0x1f)); \
195 \ 181 \
196 return (old & (1 << nr)) != 0; \ 182 return (old & (1UL << (nr & 0x1f))) != 0; \
197} 183}
198 184
199#define BIT_OPS(op, c_op, asm_op) \ 185#define BIT_OPS(op, c_op, asm_op) \
@@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
224 210
225 addr += nr >> 5; 211 addr += nr >> 5;
226 212
227 if (__builtin_constant_p(nr)) 213 mask = 1UL << (nr & 0x1f);
228 nr &= 0x1f;
229
230 mask = 1 << nr;
231 214
232 return ((mask & *addr) != 0); 215 return ((mask & *addr) != 0);
233} 216}
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 05b5aaf5b0f9..70cfe16b742d 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -16,12 +16,40 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <asm/errno.h> 17#include <asm/errno.h>
18 18
19#ifdef CONFIG_ARC_HAS_LLSC
20
21#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
22 \
23 __asm__ __volatile__( \
24 "1: llock %1, [%2] \n" \
25 insn "\n" \
26 "2: scond %0, [%2] \n" \
27 " bnz 1b \n" \
28 " mov %0, 0 \n" \
29 "3: \n" \
30 " .section .fixup,\"ax\" \n" \
31 " .align 4 \n" \
32 "4: mov %0, %4 \n" \
33 " b 3b \n" \
34 " .previous \n" \
35 " .section __ex_table,\"a\" \n" \
36 " .align 4 \n" \
37 " .word 1b, 4b \n" \
38 " .word 2b, 4b \n" \
39 " .previous \n" \
40 \
41 : "=&r" (ret), "=&r" (oldval) \
42 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
43 : "cc", "memory")
44
45#else /* !CONFIG_ARC_HAS_LLSC */
46
19#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\ 47#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
20 \ 48 \
21 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
22 "1: ld %1, [%2] \n" \ 50 "1: ld %1, [%2] \n" \
23 insn "\n" \ 51 insn "\n" \
24 "2: st %0, [%2] \n" \ 52 "2: st %0, [%2] \n" \
25 " mov %0, 0 \n" \ 53 " mov %0, 0 \n" \
26 "3: \n" \ 54 "3: \n" \
27 " .section .fixup,\"ax\" \n" \ 55 " .section .fixup,\"ax\" \n" \
@@ -39,6 +67,8 @@
39 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \ 67 : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
40 : "cc", "memory") 68 : "cc", "memory")
41 69
70#endif
71
42static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 72static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
43{ 73{
44 int op = (encoded_op >> 28) & 7; 74 int op = (encoded_op >> 28) & 7;
@@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
123 153
124 pagefault_disable(); 154 pagefault_disable();
125 155
126 /* TBD : can use llock/scond */
127 __asm__ __volatile__( 156 __asm__ __volatile__(
128 "1: ld %0, [%3] \n" 157#ifdef CONFIG_ARC_HAS_LLSC
129 " brne %0, %1, 3f \n" 158 "1: llock %0, [%3] \n"
130 "2: st %2, [%3] \n" 159 " brne %0, %1, 3f \n"
160 "2: scond %2, [%3] \n"
161 " bnz 1b \n"
162#else
163 "1: ld %0, [%3] \n"
164 " brne %0, %1, 3f \n"
165 "2: st %2, [%3] \n"
166#endif
131 "3: \n" 167 "3: \n"
132 " .section .fixup,\"ax\" \n" 168 " .section .fixup,\"ax\" \n"
133 "4: mov %0, %4 \n" 169 "4: mov %0, %4 \n"
diff --git a/arch/arc/include/asm/mm-arch-hooks.h b/arch/arc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index c37541c5f8ba..000000000000
--- a/arch/arc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ARC_MM_ARCH_HOOKS_H
13#define _ASM_ARC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ARC_MM_ARCH_HOOKS_H */
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91755972b9a2..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
20struct pt_regs { 20struct pt_regs {
21 21
22 /* Real registers */ 22 /* Real registers */
23 long bta; /* bta_l1, bta_l2, erbta */ 23 unsigned long bta; /* bta_l1, bta_l2, erbta */
24 24
25 long lp_start, lp_end, lp_count; 25 unsigned long lp_start, lp_end, lp_count;
26 26
27 long status32; /* status32_l1, status32_l2, erstatus */ 27 unsigned long status32; /* status32_l1, status32_l2, erstatus */
28 long ret; /* ilink1, ilink2 or eret */ 28 unsigned long ret; /* ilink1, ilink2 or eret */
29 long blink; 29 unsigned long blink;
30 long fp; 30 unsigned long fp;
31 long r26; /* gp */ 31 unsigned long r26; /* gp */
32 32
33 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 33 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
34 34
35 long sp; /* user/kernel sp depending on where we came from */ 35 unsigned long sp; /* User/Kernel depending on where we came from */
36 long orig_r0; 36 unsigned long orig_r0;
37 37
38 /* 38 /*
39 * To distinguish bet excp, syscall, irq 39 * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
55 unsigned long event; 55 unsigned long event;
56 }; 56 };
57 57
58 long user_r25; 58 unsigned long user_r25;
59}; 59};
60#else 60#else
61 61
62struct pt_regs { 62struct pt_regs {
63 63
64 long orig_r0; 64 unsigned long orig_r0;
65 65
66 union { 66 union {
67 struct { 67 struct {
@@ -76,26 +76,26 @@ struct pt_regs {
76 unsigned long event; 76 unsigned long event;
77 }; 77 };
78 78
79 long bta; /* bta_l1, bta_l2, erbta */ 79 unsigned long bta; /* bta_l1, bta_l2, erbta */
80 80
81 long user_r25; 81 unsigned long user_r25;
82 82
83 long r26; /* gp */ 83 unsigned long r26; /* gp */
84 long fp; 84 unsigned long fp;
85 long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 long r12; 87 unsigned long r12;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
91 91
92 long blink; 92 unsigned long blink;
93 long lp_end, lp_start, lp_count; 93 unsigned long lp_end, lp_start, lp_count;
94 94
95 long ei, ldi, jli; 95 unsigned long ei, ldi, jli;
96 96
97 long ret; 97 unsigned long ret;
98 long status32; 98 unsigned long status32;
99}; 99};
100 100
101#endif 101#endif
@@ -103,7 +103,7 @@ struct pt_regs {
103/* Callee saved registers - need to be saved only when you are scheduled out */ 103/* Callee saved registers - need to be saved only when you are scheduled out */
104 104
105struct callee_regs { 105struct callee_regs {
106 long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; 106 unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
107}; 107};
108 108
109#define instruction_pointer(regs) ((regs)->ret) 109#define instruction_pointer(regs) ((regs)->ret)
@@ -142,7 +142,7 @@ struct callee_regs {
142 142
143static inline long regs_return_value(struct pt_regs *regs) 143static inline long regs_return_value(struct pt_regs *regs)
144{ 144{
145 return regs->r0; 145 return (long)regs->r0;
146} 146}
147 147
148#endif /* !__ASSEMBLY__ */ 148#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
18#define arch_spin_unlock_wait(x) \ 18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20 20
21#ifdef CONFIG_ARC_HAS_LLSC
22
23/*
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25 */
26#ifndef CONFIG_ARC_STAR_9000923308
27
21static inline void arch_spin_lock(arch_spinlock_t *lock) 28static inline void arch_spin_lock(arch_spinlock_t *lock)
22{ 29{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 30 unsigned int val;
31
32 smp_mb();
33
34 __asm__ __volatile__(
35 "1: llock %[val], [%[slock]] \n"
36 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
37 " scond %[LOCKED], [%[slock]] \n" /* acquire */
38 " bnz 1b \n"
39 " \n"
40 : [val] "=&r" (val)
41 : [slock] "r" (&(lock->slock)),
42 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
43 : "memory", "cc");
44
45 smp_mb();
46}
47
48/* 1 - lock taken successfully */
49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{
51 unsigned int val, got_it = 0;
52
53 smp_mb();
54
55 __asm__ __volatile__(
56 "1: llock %[val], [%[slock]] \n"
57 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
58 " scond %[LOCKED], [%[slock]] \n" /* acquire */
59 " bnz 1b \n"
60 " mov %[got_it], 1 \n"
61 "4: \n"
62 " \n"
63 : [val] "=&r" (val),
64 [got_it] "+&r" (got_it)
65 : [slock] "r" (&(lock->slock)),
66 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
67 : "memory", "cc");
68
69 smp_mb();
70
71 return got_it;
72}
73
74static inline void arch_spin_unlock(arch_spinlock_t *lock)
75{
76 smp_mb();
77
78 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
79
80 smp_mb();
81}
82
83/*
84 * Read-write spinlocks, allowing multiple readers but only one writer.
85 * Unfair locking as Writers could be starved indefinitely by Reader(s)
86 */
87
88static inline void arch_read_lock(arch_rwlock_t *rw)
89{
90 unsigned int val;
91
92 smp_mb();
93
94 /*
95 * zero means writer holds the lock exclusively, deny Reader.
96 * Otherwise grant lock to first/subseq reader
97 *
98 * if (rw->counter > 0) {
99 * rw->counter--;
100 * ret = 1;
101 * }
102 */
103
104 __asm__ __volatile__(
105 "1: llock %[val], [%[rwlock]] \n"
106 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
107 " sub %[val], %[val], 1 \n" /* reader lock */
108 " scond %[val], [%[rwlock]] \n"
109 " bnz 1b \n"
110 " \n"
111 : [val] "=&r" (val)
112 : [rwlock] "r" (&(rw->counter)),
113 [WR_LOCKED] "ir" (0)
114 : "memory", "cc");
115
116 smp_mb();
117}
118
119/* 1 - lock taken successfully */
120static inline int arch_read_trylock(arch_rwlock_t *rw)
121{
122 unsigned int val, got_it = 0;
123
124 smp_mb();
125
126 __asm__ __volatile__(
127 "1: llock %[val], [%[rwlock]] \n"
128 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
129 " sub %[val], %[val], 1 \n" /* counter-- */
130 " scond %[val], [%[rwlock]] \n"
131 " bnz 1b \n" /* retry if collided with someone */
132 " mov %[got_it], 1 \n"
133 " \n"
134 "4: ; --- done --- \n"
135
136 : [val] "=&r" (val),
137 [got_it] "+&r" (got_it)
138 : [rwlock] "r" (&(rw->counter)),
139 [WR_LOCKED] "ir" (0)
140 : "memory", "cc");
141
142 smp_mb();
143
144 return got_it;
145}
146
147static inline void arch_write_lock(arch_rwlock_t *rw)
148{
149 unsigned int val;
150
151 smp_mb();
152
153 /*
154 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155 * deny writer. Otherwise if unlocked grant to writer
156 * Hence the claim that Linux rwlocks are unfair to writers.
157 * (can be starved for an indefinite time by readers).
158 *
159 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160 * rw->counter = 0;
161 * ret = 1;
162 * }
163 */
164
165 __asm__ __volatile__(
166 "1: llock %[val], [%[rwlock]] \n"
167 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
168 " mov %[val], %[WR_LOCKED] \n"
169 " scond %[val], [%[rwlock]] \n"
170 " bnz 1b \n"
171 " \n"
172 : [val] "=&r" (val)
173 : [rwlock] "r" (&(rw->counter)),
174 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
175 [WR_LOCKED] "ir" (0)
176 : "memory", "cc");
177
178 smp_mb();
179}
180
181/* 1 - lock taken successfully */
182static inline int arch_write_trylock(arch_rwlock_t *rw)
183{
184 unsigned int val, got_it = 0;
185
186 smp_mb();
187
188 __asm__ __volatile__(
189 "1: llock %[val], [%[rwlock]] \n"
190 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
191 " mov %[val], %[WR_LOCKED] \n"
192 " scond %[val], [%[rwlock]] \n"
193 " bnz 1b \n" /* retry if collided with someone */
194 " mov %[got_it], 1 \n"
195 " \n"
196 "4: ; --- done --- \n"
197
198 : [val] "=&r" (val),
199 [got_it] "+&r" (got_it)
200 : [rwlock] "r" (&(rw->counter)),
201 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
202 [WR_LOCKED] "ir" (0)
203 : "memory", "cc");
204
205 smp_mb();
206
207 return got_it;
208}
209
210static inline void arch_read_unlock(arch_rwlock_t *rw)
211{
212 unsigned int val;
213
214 smp_mb();
215
216 /*
217 * rw->counter++;
218 */
219 __asm__ __volatile__(
220 "1: llock %[val], [%[rwlock]] \n"
221 " add %[val], %[val], 1 \n"
222 " scond %[val], [%[rwlock]] \n"
223 " bnz 1b \n"
224 " \n"
225 : [val] "=&r" (val)
226 : [rwlock] "r" (&(rw->counter))
227 : "memory", "cc");
228
229 smp_mb();
230}
231
232static inline void arch_write_unlock(arch_rwlock_t *rw)
233{
234 smp_mb();
235
236 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237
238 smp_mb();
239}
240
241#else /* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
257
258#define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
264 " b 1b \n" /* start over */ \
265 " \n" \
266 "4: ; --- done --- \n" \
267
268#define SCOND_FAIL_RETRY_VARS \
269 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273 unsigned int val;
274 SCOND_FAIL_RETRY_VAR_DEF;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "0: mov %[delay], 1 \n"
280 "1: llock %[val], [%[slock]] \n"
281 " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
282 " scond %[LOCKED], [%[slock]] \n" /* acquire */
283 " bz 4f \n" /* done */
284 " \n"
285 SCOND_FAIL_RETRY_ASM
286
287 : [val] "=&r" (val)
288 SCOND_FAIL_RETRY_VARS
289 : [slock] "r" (&(lock->slock)),
290 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
291 : "memory", "cc");
292
293 smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299 unsigned int val, got_it = 0;
300 SCOND_FAIL_RETRY_VAR_DEF;
301
302 smp_mb();
303
304 __asm__ __volatile__(
305 "0: mov %[delay], 1 \n"
306 "1: llock %[val], [%[slock]] \n"
307 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
308 " scond %[LOCKED], [%[slock]] \n" /* acquire */
309 " bz.d 4f \n"
310 " mov.z %[got_it], 1 \n" /* got it */
311 " \n"
312 SCOND_FAIL_RETRY_ASM
313
314 : [val] "=&r" (val),
315 [got_it] "+&r" (got_it)
316 SCOND_FAIL_RETRY_VARS
317 : [slock] "r" (&(lock->slock)),
318 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
319 : "memory", "cc");
320
321 smp_mb();
322
323 return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328 smp_mb();
329
330 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332 smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342 unsigned int val;
343 SCOND_FAIL_RETRY_VAR_DEF;
344
345 smp_mb();
346
347 /*
348 * zero means writer holds the lock exclusively, deny Reader.
349 * Otherwise grant lock to first/subseq reader
350 *
351 * if (rw->counter > 0) {
352 * rw->counter--;
353 * ret = 1;
354 * }
355 */
356
357 __asm__ __volatile__(
358 "0: mov %[delay], 1 \n"
359 "1: llock %[val], [%[rwlock]] \n"
360 " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
361 " sub %[val], %[val], 1 \n" /* reader lock */
362 " scond %[val], [%[rwlock]] \n"
363 " bz 4f \n" /* done */
364 " \n"
365 SCOND_FAIL_RETRY_ASM
366
367 : [val] "=&r" (val)
368 SCOND_FAIL_RETRY_VARS
369 : [rwlock] "r" (&(rw->counter)),
370 [WR_LOCKED] "ir" (0)
371 : "memory", "cc");
372
373 smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379 unsigned int val, got_it = 0;
380 SCOND_FAIL_RETRY_VAR_DEF;
381
382 smp_mb();
383
384 __asm__ __volatile__(
385 "0: mov %[delay], 1 \n"
386 "1: llock %[val], [%[rwlock]] \n"
387 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
388 " sub %[val], %[val], 1 \n" /* counter-- */
389 " scond %[val], [%[rwlock]] \n"
390 " bz.d 4f \n"
391 " mov.z %[got_it], 1 \n" /* got it */
392 " \n"
393 SCOND_FAIL_RETRY_ASM
394
395 : [val] "=&r" (val),
396 [got_it] "+&r" (got_it)
397 SCOND_FAIL_RETRY_VARS
398 : [rwlock] "r" (&(rw->counter)),
399 [WR_LOCKED] "ir" (0)
400 : "memory", "cc");
401
402 smp_mb();
403
404 return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409 unsigned int val;
410 SCOND_FAIL_RETRY_VAR_DEF;
411
412 smp_mb();
413
414 /*
415 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 * deny writer. Otherwise if unlocked grant to writer
417 * Hence the claim that Linux rwlocks are unfair to writers.
418 * (can be starved for an indefinite time by readers).
419 *
420 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 * rw->counter = 0;
422 * ret = 1;
423 * }
424 */
425
426 __asm__ __volatile__(
427 "0: mov %[delay], 1 \n"
428 "1: llock %[val], [%[rwlock]] \n"
429 " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
430 " mov %[val], %[WR_LOCKED] \n"
431 " scond %[val], [%[rwlock]] \n"
432 " bz 4f \n"
433 " \n"
434 SCOND_FAIL_RETRY_ASM
435
436 : [val] "=&r" (val)
437 SCOND_FAIL_RETRY_VARS
438 : [rwlock] "r" (&(rw->counter)),
439 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
440 [WR_LOCKED] "ir" (0)
441 : "memory", "cc");
442
443 smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449 unsigned int val, got_it = 0;
450 SCOND_FAIL_RETRY_VAR_DEF;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "0: mov %[delay], 1 \n"
456 "1: llock %[val], [%[rwlock]] \n"
457 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
458 " mov %[val], %[WR_LOCKED] \n"
459 " scond %[val], [%[rwlock]] \n"
460 " bz.d 4f \n"
461 " mov.z %[got_it], 1 \n" /* got it */
462 " \n"
463 SCOND_FAIL_RETRY_ASM
464
465 : [val] "=&r" (val),
466 [got_it] "+&r" (got_it)
467 SCOND_FAIL_RETRY_VARS
468 : [rwlock] "r" (&(rw->counter)),
469 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
470 [WR_LOCKED] "ir" (0)
471 : "memory", "cc");
472
473 smp_mb();
474
475 return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480 unsigned int val;
481
482 smp_mb();
483
484 /*
485 * rw->counter++;
486 */
487 __asm__ __volatile__(
488 "1: llock %[val], [%[rwlock]] \n"
489 " add %[val], %[val], 1 \n"
490 " scond %[val], [%[rwlock]] \n"
491 " bnz 1b \n"
492 " \n"
493 : [val] "=&r" (val)
494 : [rwlock] "r" (&(rw->counter))
495 : "memory", "cc");
496
497 smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502 unsigned int val;
503
504 smp_mb();
505
506 /*
507 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 */
509 __asm__ __volatile__(
510 "1: llock %[val], [%[rwlock]] \n"
511 " scond %[UNLOCKED], [%[rwlock]]\n"
512 " bnz 1b \n"
513 " \n"
514 : [val] "=&r" (val)
515 : [rwlock] "r" (&(rw->counter)),
516 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
517 : "memory", "cc");
518
519 smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif /* CONFIG_ARC_STAR_9000923308 */
527
528#else /* !CONFIG_ARC_HAS_LLSC */
529
530static inline void arch_spin_lock(arch_spinlock_t *lock)
531{
532 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
24 533
25 /* 534 /*
26 * This smp_mb() is technically superfluous, we only need the one 535 * This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
33 __asm__ __volatile__( 542 __asm__ __volatile__(
34 "1: ex %0, [%1] \n" 543 "1: ex %0, [%1] \n"
35 " breq %0, %2, 1b \n" 544 " breq %0, %2, 1b \n"
36 : "+&r" (tmp) 545 : "+&r" (val)
37 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) 546 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory"); 547 : "memory");
39 548
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
48 smp_mb(); 557 smp_mb();
49} 558}
50 559
560/* 1 - lock taken successfully */
51static inline int arch_spin_trylock(arch_spinlock_t *lock) 561static inline int arch_spin_trylock(arch_spinlock_t *lock)
52{ 562{
53 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 563 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
54 564
55 smp_mb(); 565 smp_mb();
56 566
57 __asm__ __volatile__( 567 __asm__ __volatile__(
58 "1: ex %0, [%1] \n" 568 "1: ex %0, [%1] \n"
59 : "+r" (tmp) 569 : "+r" (val)
60 : "r"(&(lock->slock)) 570 : "r"(&(lock->slock))
61 : "memory"); 571 : "memory");
62 572
63 smp_mb(); 573 smp_mb();
64 574
65 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); 575 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
66} 576}
67 577
68static inline void arch_spin_unlock(arch_spinlock_t *lock) 578static inline void arch_spin_unlock(arch_spinlock_t *lock)
69{ 579{
70 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; 580 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
71 581
72 /* 582 /*
73 * RELEASE barrier: given the instructions avail on ARCv2, full barrier 583 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
77 587
78 __asm__ __volatile__( 588 __asm__ __volatile__(
79 " ex %0, [%1] \n" 589 " ex %0, [%1] \n"
80 : "+r" (tmp) 590 : "+r" (val)
81 : "r"(&(lock->slock)) 591 : "r"(&(lock->slock))
82 : "memory"); 592 : "memory");
83 593
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
90 600
91/* 601/*
92 * Read-write spinlocks, allowing multiple readers but only one writer. 602 * Read-write spinlocks, allowing multiple readers but only one writer.
603 * Unfair locking as Writers could be starved indefinitely by Reader(s)
93 * 604 *
94 * The spinlock itself is contained in @counter and access to it is 605 * The spinlock itself is contained in @counter and access to it is
95 * serialized with @lock_mutex. 606 * serialized with @lock_mutex.
96 *
97 * Unfair locking as Writers could be starved indefinitely by Reader(s)
98 */ 607 */
99 608
100/* Would read_trylock() succeed? */
101#define arch_read_can_lock(x) ((x)->counter > 0)
102
103/* Would write_trylock() succeed? */
104#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
105
106/* 1 - lock taken successfully */ 609/* 1 - lock taken successfully */
107static inline int arch_read_trylock(arch_rwlock_t *rw) 610static inline int arch_read_trylock(arch_rwlock_t *rw)
108{ 611{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
173 arch_spin_unlock(&(rw->lock_mutex)); 676 arch_spin_unlock(&(rw->lock_mutex));
174} 677}
175 678
679#endif
680
681#define arch_read_can_lock(x) ((x)->counter > 0)
682#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
683
176#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 684#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
177#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 685#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
178 686
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
26 */ 26 */
27typedef struct { 27typedef struct {
28 volatile unsigned int counter; 28 volatile unsigned int counter;
29#ifndef CONFIG_ARC_HAS_LLSC
29 arch_spinlock_t lock_mutex; 30 arch_spinlock_t lock_mutex;
31#endif
30} arch_rwlock_t; 32} arch_rwlock_t;
31 33
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 34#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
32*/ 32*/
33struct user_regs_struct { 33struct user_regs_struct {
34 34
35 long pad; 35 unsigned long pad;
36 struct { 36 struct {
37 long bta, lp_start, lp_end, lp_count; 37 unsigned long bta, lp_start, lp_end, lp_count;
38 long status32, ret, blink, fp, gp; 38 unsigned long status32, ret, blink, fp, gp;
39 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 39 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
40 long sp; 40 unsigned long sp;
41 } scratch; 41 } scratch;
42 long pad2; 42 unsigned long pad2;
43 struct { 43 struct {
44 long r25, r24, r23, r22, r21, r20; 44 unsigned long r25, r24, r23, r22, r21, r20;
45 long r19, r18, r17, r16, r15, r14, r13; 45 unsigned long r19, r18, r17, r16, r15, r14, r13;
46 } callee; 46 } callee;
47 long efa; /* break pt addr, for break points in delay slots */ 47 unsigned long efa; /* break pt addr, for break points in delay slots */
48 long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ 48 unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
49}; 49};
50#endif /* !__ASSEMBLY__ */ 50#endif /* !__ASSEMBLY__ */
51 51
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 6208c630abed..26c156827479 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -12,7 +12,6 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/irqdomain.h> 13#include <linux/irqdomain.h>
14#include <linux/irqchip.h> 14#include <linux/irqchip.h>
15#include "../../drivers/irqchip/irqchip.h"
16#include <asm/irq.h> 15#include <asm/irq.h>
17 16
18/* 17/*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index fcdddb631766..039fac30b5c1 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -12,7 +12,6 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/irqdomain.h> 13#include <linux/irqdomain.h>
14#include <linux/irqchip.h> 14#include <linux/irqchip.h>
15#include "../../drivers/irqchip/irqchip.h"
16#include <asm/irq.h> 15#include <asm/irq.h>
17 16
18/* 17/*
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 30284e8de6ff..2fb86589054d 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -175,7 +175,6 @@ void mcip_init_early_smp(void)
175#include <linux/irqchip.h> 175#include <linux/irqchip.h>
176#include <linux/of.h> 176#include <linux/of.h>
177#include <linux/of_irq.h> 177#include <linux/of_irq.h>
178#include "../../drivers/irqchip/irqchip.h"
179 178
180/* 179/*
181 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core) 180 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
@@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data)
218 raw_spin_unlock_irqrestore(&mcip_lock, flags); 217 raw_spin_unlock_irqrestore(&mcip_lock, flags);
219} 218}
220 219
220#ifdef CONFIG_SMP
221static int 221static int
222idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f) 222idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
223 bool force)
223{ 224{
225 unsigned long flags;
226 cpumask_t online;
227
228 /* errout if no online cpu per @cpumask */
229 if (!cpumask_and(&online, cpumask, cpu_online_mask))
230 return -EINVAL;
231
232 raw_spin_lock_irqsave(&mcip_lock, flags);
233
234 idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
235 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
236
237 raw_spin_unlock_irqrestore(&mcip_lock, flags);
238
224 return IRQ_SET_MASK_OK; 239 return IRQ_SET_MASK_OK;
225} 240}
241#endif
226 242
227static struct irq_chip idu_irq_chip = { 243static struct irq_chip idu_irq_chip = {
228 .name = "MCIP IDU Intc", 244 .name = "MCIP IDU Intc",
@@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
330 if (!i) 346 if (!i)
331 idu_first_irq = irq; 347 idu_first_irq = irq;
332 348
333 irq_set_handler_data(irq, domain); 349 irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
334 irq_set_chained_handler(irq, idu_cascade_isr);
335 } 350 }
336 351
337 __mcip_cmd(CMD_IDU_ENABLE, 0); 352 __mcip_cmd(CMD_IDU_ENABLE, 0);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a3d186211ed3..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
47 struct bcr_perip uncached_space; 47 struct bcr_perip uncached_space;
48 struct bcr_generic bcr; 48 struct bcr_generic bcr;
49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
50 unsigned long perip_space;
50 FIX_PTR(cpu); 51 FIX_PTR(cpu);
51 52
52 READ_BCR(AUX_IDENTITY, cpu->core); 53 READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
56 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 57 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
57 58
58 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 59 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
59 BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); 60 if (uncached_space.ver < 3)
61 perip_space = uncached_space.start << 24;
62 else
63 perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
64
65 BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
60 66
61 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); 67 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
62 68
@@ -142,17 +148,22 @@ static void read_arc_build_cfg_regs(void)
142} 148}
143 149
144static const struct cpuinfo_data arc_cpu_tbl[] = { 150static const struct cpuinfo_data arc_cpu_tbl[] = {
151#ifdef CONFIG_ISA_ARCOMPACT
145 { {0x20, "ARC 600" }, 0x2F}, 152 { {0x20, "ARC 600" }, 0x2F},
146 { {0x30, "ARC 700" }, 0x33}, 153 { {0x30, "ARC 700" }, 0x33},
147 { {0x34, "ARC 700 R4.10"}, 0x34}, 154 { {0x34, "ARC 700 R4.10"}, 0x34},
148 { {0x35, "ARC 700 R4.11"}, 0x35}, 155 { {0x35, "ARC 700 R4.11"}, 0x35},
149 { {0x50, "ARC HS38" }, 0x51}, 156#else
157 { {0x50, "ARC HS38 R2.0"}, 0x51},
158 { {0x52, "ARC HS38 R2.1"}, 0x52},
159#endif
150 { {0x00, NULL } } 160 { {0x00, NULL } }
151}; 161};
152 162
153#define IS_AVAIL1(v, str) ((v) ? str : "") 163#define IS_AVAIL1(v, s) ((v) ? s : "")
154#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ") 164#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
155#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg)) 165#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
166#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
156 167
157static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 168static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
158{ 169{
@@ -226,7 +237,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
226 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); 237 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
227 } 238 }
228 n += scnprintf(buf + n, len - n, "%s", 239 n += scnprintf(buf + n, len - n, "%s",
229 IS_USED(CONFIG_ARC_HAS_HW_MPY)); 240 IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
230 } 241 }
231 242
232 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", 243 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
@@ -325,6 +336,10 @@ static void arc_chk_core_config(void)
325 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); 336 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
326 else if (!cpu->extn.fpu_dp && fpu_enabled) 337 else if (!cpu->extn.fpu_dp && fpu_enabled)
327 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); 338 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
339
340 if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
341 !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
342 panic("llock/scond livelock workaround missing\n");
328} 343}
329 344
330/* 345/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
203 return 0; 203 return 0;
204} 204}
205 205
206static void arc_clkevent_set_mode(enum clock_event_mode mode, 206static int arc_clkevent_set_periodic(struct clock_event_device *dev)
207 struct clock_event_device *dev)
208{ 207{
209 switch (mode) { 208 /*
210 case CLOCK_EVT_MODE_PERIODIC: 209 * At X Hz, 1 sec = 1000ms -> X cycles;
211 /* 210 * 10ms -> X / 100 cycles
212 * At X Hz, 1 sec = 1000ms -> X cycles; 211 */
213 * 10ms -> X / 100 cycles 212 arc_timer_event_setup(arc_get_core_freq() / HZ);
214 */ 213 return 0;
215 arc_timer_event_setup(arc_get_core_freq() / HZ);
216 break;
217 case CLOCK_EVT_MODE_ONESHOT:
218 break;
219 default:
220 break;
221 }
222
223 return;
224} 214}
225 215
226static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { 216static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
227 .name = "ARC Timer0", 217 .name = "ARC Timer0",
228 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 218 .features = CLOCK_EVT_FEAT_ONESHOT |
229 .mode = CLOCK_EVT_MODE_UNUSED, 219 CLOCK_EVT_FEAT_PERIODIC,
230 .rating = 300, 220 .rating = 300,
231 .irq = TIMER0_IRQ, /* hardwired, no need for resources */ 221 .irq = TIMER0_IRQ, /* hardwired, no need for resources */
232 .set_next_event = arc_clkevent_set_next_event, 222 .set_next_event = arc_clkevent_set_next_event,
233 .set_mode = arc_clkevent_set_mode, 223 .set_state_periodic = arc_clkevent_set_periodic,
234}; 224};
235 225
236static irqreturn_t timer_irq_handler(int irq, void *dev_id) 226static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
240 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() 230 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
241 */ 231 */
242 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 232 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
243 int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; 233 int irq_reenable = clockevent_state_periodic(evt);
244 234
245 /* 235 /*
246 * Any write to CTRL reg ACks the interrupt, we rewrite the 236 * Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 807f7d61d7a7..a6f91e88ce36 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs)
58 58
59static void print_task_path_n_nm(struct task_struct *tsk, char *buf) 59static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
60{ 60{
61 struct path path;
62 char *path_nm = NULL; 61 char *path_nm = NULL;
63 struct mm_struct *mm; 62 struct mm_struct *mm;
64 struct file *exe_file; 63 struct file *exe_file;
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
206 ld.ab r6, [r1, 4] 206 ld.ab r6, [r1, 4]
207 prefetch [r1, 28] ;Prefetch the next read location 207 prefetch [r1, 28] ;Prefetch the next read location
208 ld.ab r8, [r1,4] 208 ld.ab r8, [r1,4]
209 prefetch [r3, 32] ;Prefetch the next write location 209 prefetchw [r3, 32] ;Prefetch the next write location
210 210
211 SHIFT_1 (r7, r6, 8) 211 SHIFT_1 (r7, r6, 8)
212 or r7, r7, r5 212 or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
10 10
11#undef PREALLOC_NOT_AVAIL 11#undef PREALLOC_NOT_AVAIL
12 12
13#ifdef PREALLOC_NOT_AVAIL
14#define PREWRITE(A,B) prefetchw [(A),(B)]
15#else
16#define PREWRITE(A,B) prealloc [(A),(B)]
17#endif
18
19ENTRY(memset) 13ENTRY(memset)
20 prefetchw [r0] ; Prefetch the write location 14 prefetchw [r0] ; Prefetch the write location
21 mov.f 0, r2 15 mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
51 45
52;;; Convert len to Dwords, unfold x8 46;;; Convert len to Dwords, unfold x8
53 lsr.f lp_count, lp_count, 6 47 lsr.f lp_count, lp_count, 6
48
54 lpnz @.Lset64bytes 49 lpnz @.Lset64bytes
55 ;; LOOP START 50 ;; LOOP START
56 PREWRITE(r3, 64) ;Prefetch the next write location 51#ifdef PREALLOC_NOT_AVAIL
52 prefetchw [r3, 64] ;Prefetch the next write location
53#else
54 prealloc [r3, 64]
55#endif
56#ifdef CONFIG_ARC_HAS_LL64
57 std.ab r4, [r3, 8] 57 std.ab r4, [r3, 8]
58 std.ab r4, [r3, 8] 58 std.ab r4, [r3, 8]
59 std.ab r4, [r3, 8] 59 std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
62 std.ab r4, [r3, 8] 62 std.ab r4, [r3, 8]
63 std.ab r4, [r3, 8] 63 std.ab r4, [r3, 8]
64 std.ab r4, [r3, 8] 64 std.ab r4, [r3, 8]
65#else
66 st.ab r4, [r3, 4]
67 st.ab r4, [r3, 4]
68 st.ab r4, [r3, 4]
69 st.ab r4, [r3, 4]
70 st.ab r4, [r3, 4]
71 st.ab r4, [r3, 4]
72 st.ab r4, [r3, 4]
73 st.ab r4, [r3, 4]
74 st.ab r4, [r3, 4]
75 st.ab r4, [r3, 4]
76 st.ab r4, [r3, 4]
77 st.ab r4, [r3, 4]
78 st.ab r4, [r3, 4]
79 st.ab r4, [r3, 4]
80 st.ab r4, [r3, 4]
81 st.ab r4, [r3, 4]
82#endif
65.Lset64bytes: 83.Lset64bytes:
66 84
67 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes 85 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
68 lpnz .Lset32bytes 86 lpnz .Lset32bytes
69 ;; LOOP START 87 ;; LOOP START
70 prefetchw [r3, 32] ;Prefetch the next write location 88 prefetchw [r3, 32] ;Prefetch the next write location
89#ifdef CONFIG_ARC_HAS_LL64
71 std.ab r4, [r3, 8] 90 std.ab r4, [r3, 8]
72 std.ab r4, [r3, 8] 91 std.ab r4, [r3, 8]
73 std.ab r4, [r3, 8] 92 std.ab r4, [r3, 8]
74 std.ab r4, [r3, 8] 93 std.ab r4, [r3, 8]
94#else
95 st.ab r4, [r3, 4]
96 st.ab r4, [r3, 4]
97 st.ab r4, [r3, 4]
98 st.ab r4, [r3, 4]
99 st.ab r4, [r3, 4]
100 st.ab r4, [r3, 4]
101 st.ab r4, [r3, 4]
102 st.ab r4, [r3, 4]
103#endif
75.Lset32bytes: 104.Lset32bytes:
76 105
77 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes 106 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index b29d62ed4f7e..1cd6695b6ab5 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
468noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) 468noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
469{ 469{
470#ifdef CONFIG_ISA_ARCV2 470#ifdef CONFIG_ISA_ARCV2
471 /*
472 * SLC is shared between all cores and concurrent aux operations from
473 * multiple cores need to be serialized using a spinlock
474 * A concurrent operation can be silently ignored and/or the old/new
475 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
476 * below)
477 */
478 static DEFINE_SPINLOCK(lock);
471 unsigned long flags; 479 unsigned long flags;
472 unsigned int ctrl; 480 unsigned int ctrl;
473 481
474 local_irq_save(flags); 482 spin_lock_irqsave(&lock, flags);
475 483
476 /* 484 /*
477 * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 485 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
@@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
504 512
505 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 513 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
506 514
507 local_irq_restore(flags); 515 spin_unlock_irqrestore(&lock, flags);
508#endif 516#endif
509} 517}
510 518
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 74a637a1cfc4..57706a9c6948 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
60 60
61 /* This is kernel Virtual address (0x7000_0000 based) */ 61 /* This is kernel Virtual address (0x7000_0000 based) */
62 kvaddr = ioremap_nocache((unsigned long)paddr, size); 62 kvaddr = ioremap_nocache((unsigned long)paddr, size);
63 if (kvaddr != NULL) 63 if (kvaddr == NULL)
64 memset(kvaddr, 0, size); 64 return NULL;
65 65
66 /* This is bus address, platform dependent */ 66 /* This is bus address, platform dependent */
67 *dma_handle = (dma_addr_t)paddr; 67 *dma_handle = (dma_addr_t)paddr;
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
389 389
390static void __init axs103_early_init(void) 390static void __init axs103_early_init(void)
391{ 391{
392 /*
393 * AXS103 configurations for SMP/QUAD configurations share device tree
394 * which defaults to 90 MHz. However recent failures of Quad config
395 * revealed P&R timing violations so clamp it down to safe 50 MHz
396 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
397 *
398 * This hack is really hacky as of now. Fix it properly by getting the
399 * number of cores as return value of platform's early SMP callback
400 */
401#ifdef CONFIG_ARC_MCIP
402 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
403 if (num_cores > 2)
404 arc_set_core_freq(50 * 1000000);
405#endif
406
392 switch (arc_get_core_freq()/1000000) { 407 switch (arc_get_core_freq()/1000000) {
393 case 33: 408 case 33:
394 axs103_set_freq(1, 1, 1); 409 axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c1425c3a..1c5021002fe4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1693,6 +1693,12 @@ config HIGHMEM
1693config HIGHPTE 1693config HIGHPTE
1694 bool "Allocate 2nd-level pagetables from highmem" 1694 bool "Allocate 2nd-level pagetables from highmem"
1695 depends on HIGHMEM 1695 depends on HIGHMEM
1696 help
1697 The VM uses one page of physical memory for each page table.
1698 For systems with a lot of processes, this can use a lot of
1699 precious low memory, eventually leading to low memory being
1700 consumed by page tables. Setting this option will allow
1701 user-space 2nd level page tables to reside in high memory.
1696 1702
1697config HW_PERF_EVENTS 1703config HW_PERF_EVENTS
1698 bool "Enable hardware performance counter support for perf events" 1704 bool "Enable hardware performance counter support for perf events"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f1b157971366..a2e16f940394 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
1635 1635
1636config DEBUG_SET_MODULE_RONX 1636config DEBUG_SET_MODULE_RONX
1637 bool "Set loadable kernel module data as NX and text as RO" 1637 bool "Set loadable kernel module data as NX and text as RO"
1638 depends on MODULES 1638 depends on MODULES && MMU
1639 ---help--- 1639 ---help---
1640 This option helps catch unintended modifications to loadable 1640 This option helps catch unintended modifications to loadable
1641 kernel module's text and read-only data. It also prevents execution 1641 kernel module's text and read-only data. It also prevents execution
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d203916..7451b447cc2d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
312 312
313PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) 313PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
314 314
315bootpImage uImage: zImage
316zImage: Image
317
315$(BOOT_TARGETS): vmlinux 318$(BOOT_TARGETS): vmlinux
316 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 319 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
317 320
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 901739fcb85a..5c42d259fa68 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -80,3 +80,7 @@
80 status = "okay"; 80 status = "okay";
81 }; 81 };
82}; 82};
83
84&rtc {
85 system-power-controller;
86};
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 0d35ab64641c..7106114c7464 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -74,6 +74,7 @@
74 audio_codec: tlv320aic3106@1b { 74 audio_codec: tlv320aic3106@1b {
75 compatible = "ti,tlv320aic3106"; 75 compatible = "ti,tlv320aic3106";
76 reg = <0x1b>; 76 reg = <0x1b>;
77 ai3x-micbias-vg = <0x2>;
77 }; 78 };
78 79
79 accel: lis331dlh@1d { 80 accel: lis331dlh@1d {
@@ -153,7 +154,7 @@
153 ti,audio-routing = 154 ti,audio-routing =
154 "Headphone Jack", "HPLOUT", 155 "Headphone Jack", "HPLOUT",
155 "Headphone Jack", "HPROUT", 156 "Headphone Jack", "HPROUT",
156 "LINE1L", "Line In"; 157 "MIC3L", "Mic3L Switch";
157}; 158};
158 159
159&mcasp0 { 160&mcasp0 {
@@ -438,41 +439,50 @@
438 regulators { 439 regulators {
439 dcdc1_reg: regulator@0 { 440 dcdc1_reg: regulator@0 {
440 /* VDD_1V8 system supply */ 441 /* VDD_1V8 system supply */
442 regulator-always-on;
441 }; 443 };
442 444
443 dcdc2_reg: regulator@1 { 445 dcdc2_reg: regulator@1 {
444 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */ 446 /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
445 regulator-name = "vdd_core"; 447 regulator-name = "vdd_core";
446 regulator-min-microvolt = <925000>; 448 regulator-min-microvolt = <925000>;
447 regulator-max-microvolt = <1325000>; 449 regulator-max-microvolt = <1150000>;
448 regulator-boot-on; 450 regulator-boot-on;
451 regulator-always-on;
449 }; 452 };
450 453
451 dcdc3_reg: regulator@2 { 454 dcdc3_reg: regulator@2 {
452 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */ 455 /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
453 regulator-name = "vdd_mpu"; 456 regulator-name = "vdd_mpu";
454 regulator-min-microvolt = <925000>; 457 regulator-min-microvolt = <925000>;
455 regulator-max-microvolt = <1150000>; 458 regulator-max-microvolt = <1325000>;
456 regulator-boot-on; 459 regulator-boot-on;
460 regulator-always-on;
457 }; 461 };
458 462
459 ldo1_reg: regulator@3 { 463 ldo1_reg: regulator@3 {
460 /* VRTC 1.8V always-on supply */ 464 /* VRTC 1.8V always-on supply */
465 regulator-name = "vrtc,vdds";
461 regulator-always-on; 466 regulator-always-on;
462 }; 467 };
463 468
464 ldo2_reg: regulator@4 { 469 ldo2_reg: regulator@4 {
465 /* 3.3V rail */ 470 /* 3.3V rail */
471 regulator-name = "vdd_3v3aux";
472 regulator-always-on;
466 }; 473 };
467 474
468 ldo3_reg: regulator@5 { 475 ldo3_reg: regulator@5 {
469 /* VDD_3V3A 3.3V rail */ 476 /* VDD_3V3A 3.3V rail */
477 regulator-name = "vdd_3v3a";
470 regulator-min-microvolt = <3300000>; 478 regulator-min-microvolt = <3300000>;
471 regulator-max-microvolt = <3300000>; 479 regulator-max-microvolt = <3300000>;
472 }; 480 };
473 481
474 ldo4_reg: regulator@6 { 482 ldo4_reg: regulator@6 {
475 /* VDD_3V3B 3.3V rail */ 483 /* VDD_3V3B 3.3V rail */
484 regulator-name = "vdd_3v3b";
485 regulator-always-on;
476 }; 486 };
477 }; 487 };
478}; 488};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index c80a3e233792..ade28c790f4b 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -132,6 +132,12 @@
132 }; 132 };
133 }; 133 };
134 134
135 emif: emif@4c000000 {
136 compatible = "ti,emif-am4372";
137 reg = <0x4c000000 0x1000000>;
138 ti,hwmods = "emif";
139 };
140
135 edma: edma@49000000 { 141 edma: edma@49000000 {
136 compatible = "ti,edma3"; 142 compatible = "ti,edma3";
137 ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; 143 ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
@@ -941,6 +947,7 @@
941 ti,hwmods = "dss_rfbi"; 947 ti,hwmods = "dss_rfbi";
942 clocks = <&disp_clk>; 948 clocks = <&disp_clk>;
943 clock-names = "fck"; 949 clock-names = "fck";
950 status = "disabled";
944 }; 951 };
945 }; 952 };
946 953
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index a42cc377a862..a63bf78191ea 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -605,6 +605,10 @@
605 phy-supply = <&ldousb_reg>; 605 phy-supply = <&ldousb_reg>;
606}; 606};
607 607
608&usb2_phy2 {
609 phy-supply = <&ldousb_reg>;
610};
611
608&usb1 { 612&usb1 {
609 dr_mode = "host"; 613 dr_mode = "host";
610 pinctrl-names = "default"; 614 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
index 5dfd3a44bf82..3e21311f9514 100644
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ b/arch/arm/boot/dts/atlas7.dtsi
@@ -135,6 +135,1025 @@
135 compatible = "sirf,atlas7-ioc"; 135 compatible = "sirf,atlas7-ioc";
136 reg = <0x18880000 0x1000>, 136 reg = <0x18880000 0x1000>,
137 <0x10E40000 0x1000>; 137 <0x10E40000 0x1000>;
138
139 audio_ac97_pmx: audio_ac97@0 {
140 audio_ac97 {
141 groups = "audio_ac97_grp";
142 function = "audio_ac97";
143 };
144 };
145
146 audio_func_dbg_pmx: audio_func_dbg@0 {
147 audio_func_dbg {
148 groups = "audio_func_dbg_grp";
149 function = "audio_func_dbg";
150 };
151 };
152
153 audio_i2s_pmx: audio_i2s@0 {
154 audio_i2s {
155 groups = "audio_i2s_grp";
156 function = "audio_i2s";
157 };
158 };
159
160 audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
161 audio_i2s_2ch {
162 groups = "audio_i2s_2ch_grp";
163 function = "audio_i2s_2ch";
164 };
165 };
166
167 audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
168 audio_i2s_extclk {
169 groups = "audio_i2s_extclk_grp";
170 function = "audio_i2s_extclk";
171 };
172 };
173
174 audio_uart0_pmx: audio_uart0@0 {
175 audio_uart0 {
176 groups = "audio_uart0_grp";
177 function = "audio_uart0";
178 };
179 };
180
181 audio_uart1_pmx: audio_uart1@0 {
182 audio_uart1 {
183 groups = "audio_uart1_grp";
184 function = "audio_uart1";
185 };
186 };
187
188 audio_uart2_pmx0: audio_uart2@0 {
189 audio_uart2_0 {
190 groups = "audio_uart2_grp0";
191 function = "audio_uart2_m0";
192 };
193 };
194
195 audio_uart2_pmx1: audio_uart2@1 {
196 audio_uart2_1 {
197 groups = "audio_uart2_grp1";
198 function = "audio_uart2_m1";
199 };
200 };
201
202 c_can_trnsvr_pmx: c_can_trnsvr@0 {
203 c_can_trnsvr {
204 groups = "c_can_trnsvr_grp";
205 function = "c_can_trnsvr";
206 };
207 };
208
209 c0_can_pmx0: c0_can@0 {
210 c0_can_0 {
211 groups = "c0_can_grp0";
212 function = "c0_can_m0";
213 };
214 };
215
216 c0_can_pmx1: c0_can@1 {
217 c0_can_1 {
218 groups = "c0_can_grp1";
219 function = "c0_can_m1";
220 };
221 };
222
223 c1_can_pmx0: c1_can@0 {
224 c1_can_0 {
225 groups = "c1_can_grp0";
226 function = "c1_can_m0";
227 };
228 };
229
230 c1_can_pmx1: c1_can@1 {
231 c1_can_1 {
232 groups = "c1_can_grp1";
233 function = "c1_can_m1";
234 };
235 };
236
237 c1_can_pmx2: c1_can@2 {
238 c1_can_2 {
239 groups = "c1_can_grp2";
240 function = "c1_can_m2";
241 };
242 };
243
244 ca_audio_lpc_pmx: ca_audio_lpc@0 {
245 ca_audio_lpc {
246 groups = "ca_audio_lpc_grp";
247 function = "ca_audio_lpc";
248 };
249 };
250
251 ca_bt_lpc_pmx: ca_bt_lpc@0 {
252 ca_bt_lpc {
253 groups = "ca_bt_lpc_grp";
254 function = "ca_bt_lpc";
255 };
256 };
257
258 ca_coex_pmx: ca_coex@0 {
259 ca_coex {
260 groups = "ca_coex_grp";
261 function = "ca_coex";
262 };
263 };
264
265 ca_curator_lpc_pmx: ca_curator_lpc@0 {
266 ca_curator_lpc {
267 groups = "ca_curator_lpc_grp";
268 function = "ca_curator_lpc";
269 };
270 };
271
272 ca_pcm_debug_pmx: ca_pcm_debug@0 {
273 ca_pcm_debug {
274 groups = "ca_pcm_debug_grp";
275 function = "ca_pcm_debug";
276 };
277 };
278
279 ca_pio_pmx: ca_pio@0 {
280 ca_pio {
281 groups = "ca_pio_grp";
282 function = "ca_pio";
283 };
284 };
285
286 ca_sdio_debug_pmx: ca_sdio_debug@0 {
287 ca_sdio_debug {
288 groups = "ca_sdio_debug_grp";
289 function = "ca_sdio_debug";
290 };
291 };
292
293 ca_spi_pmx: ca_spi@0 {
294 ca_spi {
295 groups = "ca_spi_grp";
296 function = "ca_spi";
297 };
298 };
299
300 ca_trb_pmx: ca_trb@0 {
301 ca_trb {
302 groups = "ca_trb_grp";
303 function = "ca_trb";
304 };
305 };
306
307 ca_uart_debug_pmx: ca_uart_debug@0 {
308 ca_uart_debug {
309 groups = "ca_uart_debug_grp";
310 function = "ca_uart_debug";
311 };
312 };
313
314 clkc_pmx0: clkc@0 {
315 clkc_0 {
316 groups = "clkc_grp0";
317 function = "clkc_m0";
318 };
319 };
320
321 clkc_pmx1: clkc@1 {
322 clkc_1 {
323 groups = "clkc_grp1";
324 function = "clkc_m1";
325 };
326 };
327
328 gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
329 gn_gnss_i2c {
330 groups = "gn_gnss_i2c_grp";
331 function = "gn_gnss_i2c";
332 };
333 };
334
335 gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
336 gn_gnss_uart_nopause {
337 groups = "gn_gnss_uart_nopause_grp";
338 function = "gn_gnss_uart_nopause";
339 };
340 };
341
342 gn_gnss_uart_pmx: gn_gnss_uart@0 {
343 gn_gnss_uart {
344 groups = "gn_gnss_uart_grp";
345 function = "gn_gnss_uart";
346 };
347 };
348
349 gn_trg_spi_pmx0: gn_trg_spi@0 {
350 gn_trg_spi_0 {
351 groups = "gn_trg_spi_grp0";
352 function = "gn_trg_spi_m0";
353 };
354 };
355
356 gn_trg_spi_pmx1: gn_trg_spi@1 {
357 gn_trg_spi_1 {
358 groups = "gn_trg_spi_grp1";
359 function = "gn_trg_spi_m1";
360 };
361 };
362
363 cvbs_dbg_pmx: cvbs_dbg@0 {
364 cvbs_dbg {
365 groups = "cvbs_dbg_grp";
366 function = "cvbs_dbg";
367 };
368 };
369
370 cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
371 cvbs_dbg_test_0 {
372 groups = "cvbs_dbg_test_grp0";
373 function = "cvbs_dbg_test_m0";
374 };
375 };
376
377 cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
378 cvbs_dbg_test_1 {
379 groups = "cvbs_dbg_test_grp1";
380 function = "cvbs_dbg_test_m1";
381 };
382 };
383
384 cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
385 cvbs_dbg_test_2 {
386 groups = "cvbs_dbg_test_grp2";
387 function = "cvbs_dbg_test_m2";
388 };
389 };
390
391 cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
392 cvbs_dbg_test_3 {
393 groups = "cvbs_dbg_test_grp3";
394 function = "cvbs_dbg_test_m3";
395 };
396 };
397
398 cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
399 cvbs_dbg_test_4 {
400 groups = "cvbs_dbg_test_grp4";
401 function = "cvbs_dbg_test_m4";
402 };
403 };
404
405 cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
406 cvbs_dbg_test_5 {
407 groups = "cvbs_dbg_test_grp5";
408 function = "cvbs_dbg_test_m5";
409 };
410 };
411
412 cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
413 cvbs_dbg_test_6 {
414 groups = "cvbs_dbg_test_grp6";
415 function = "cvbs_dbg_test_m6";
416 };
417 };
418
419 cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
420 cvbs_dbg_test_7 {
421 groups = "cvbs_dbg_test_grp7";
422 function = "cvbs_dbg_test_m7";
423 };
424 };
425
426 cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
427 cvbs_dbg_test_8 {
428 groups = "cvbs_dbg_test_grp8";
429 function = "cvbs_dbg_test_m8";
430 };
431 };
432
433 cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
434 cvbs_dbg_test_9 {
435 groups = "cvbs_dbg_test_grp9";
436 function = "cvbs_dbg_test_m9";
437 };
438 };
439
440 cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
441 cvbs_dbg_test_10 {
442 groups = "cvbs_dbg_test_grp10";
443 function = "cvbs_dbg_test_m10";
444 };
445 };
446
447 cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
448 cvbs_dbg_test_11 {
449 groups = "cvbs_dbg_test_grp11";
450 function = "cvbs_dbg_test_m11";
451 };
452 };
453
454 cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
455 cvbs_dbg_test_12 {
456 groups = "cvbs_dbg_test_grp12";
457 function = "cvbs_dbg_test_m12";
458 };
459 };
460
461 cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
462 cvbs_dbg_test_13 {
463 groups = "cvbs_dbg_test_grp13";
464 function = "cvbs_dbg_test_m13";
465 };
466 };
467
468 cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
469 cvbs_dbg_test_14 {
470 groups = "cvbs_dbg_test_grp14";
471 function = "cvbs_dbg_test_m14";
472 };
473 };
474
475 cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
476 cvbs_dbg_test_15 {
477 groups = "cvbs_dbg_test_grp15";
478 function = "cvbs_dbg_test_m15";
479 };
480 };
481
482 gn_gnss_power_pmx: gn_gnss_power@0 {
483 gn_gnss_power {
484 groups = "gn_gnss_power_grp";
485 function = "gn_gnss_power";
486 };
487 };
488
489 gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
490 gn_gnss_sw_status {
491 groups = "gn_gnss_sw_status_grp";
492 function = "gn_gnss_sw_status";
493 };
494 };
495
496 gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
497 gn_gnss_eclk {
498 groups = "gn_gnss_eclk_grp";
499 function = "gn_gnss_eclk";
500 };
501 };
502
503 gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
504 gn_gnss_irq1_0 {
505 groups = "gn_gnss_irq1_grp0";
506 function = "gn_gnss_irq1_m0";
507 };
508 };
509
510 gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
511 gn_gnss_irq2_0 {
512 groups = "gn_gnss_irq2_grp0";
513 function = "gn_gnss_irq2_m0";
514 };
515 };
516
517 gn_gnss_tm_pmx: gn_gnss_tm@0 {
518 gn_gnss_tm {
519 groups = "gn_gnss_tm_grp";
520 function = "gn_gnss_tm";
521 };
522 };
523
524 gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
525 gn_gnss_tsync {
526 groups = "gn_gnss_tsync_grp";
527 function = "gn_gnss_tsync";
528 };
529 };
530
531 gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
532 gn_io_gnsssys_sw_cfg {
533 groups = "gn_io_gnsssys_sw_cfg_grp";
534 function = "gn_io_gnsssys_sw_cfg";
535 };
536 };
537
538 gn_trg_pmx0: gn_trg@0 {
539 gn_trg_0 {
540 groups = "gn_trg_grp0";
541 function = "gn_trg_m0";
542 };
543 };
544
545 gn_trg_pmx1: gn_trg@1 {
546 gn_trg_1 {
547 groups = "gn_trg_grp1";
548 function = "gn_trg_m1";
549 };
550 };
551
552 gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
553 gn_trg_shutdown_0 {
554 groups = "gn_trg_shutdown_grp0";
555 function = "gn_trg_shutdown_m0";
556 };
557 };
558
559 gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
560 gn_trg_shutdown_1 {
561 groups = "gn_trg_shutdown_grp1";
562 function = "gn_trg_shutdown_m1";
563 };
564 };
565
566 gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
567 gn_trg_shutdown_2 {
568 groups = "gn_trg_shutdown_grp2";
569 function = "gn_trg_shutdown_m2";
570 };
571 };
572
573 gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
574 gn_trg_shutdown_3 {
575 groups = "gn_trg_shutdown_grp3";
576 function = "gn_trg_shutdown_m3";
577 };
578 };
579
580 i2c0_pmx: i2c0@0 {
581 i2c0 {
582 groups = "i2c0_grp";
583 function = "i2c0";
584 };
585 };
586
587 i2c1_pmx: i2c1@0 {
588 i2c1 {
589 groups = "i2c1_grp";
590 function = "i2c1";
591 };
592 };
593
594 jtag_pmx0: jtag@0 {
595 jtag_0 {
596 groups = "jtag_grp0";
597 function = "jtag_m0";
598 };
599 };
600
601 ks_kas_spi_pmx0: ks_kas_spi@0 {
602 ks_kas_spi_0 {
603 groups = "ks_kas_spi_grp0";
604 function = "ks_kas_spi_m0";
605 };
606 };
607
608 ld_ldd_pmx: ld_ldd@0 {
609 ld_ldd {
610 groups = "ld_ldd_grp";
611 function = "ld_ldd";
612 };
613 };
614
615 ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
616 ld_ldd_16bit {
617 groups = "ld_ldd_16bit_grp";
618 function = "ld_ldd_16bit";
619 };
620 };
621
622 ld_ldd_fck_pmx: ld_ldd_fck@0 {
623 ld_ldd_fck {
624 groups = "ld_ldd_fck_grp";
625 function = "ld_ldd_fck";
626 };
627 };
628
629 ld_ldd_lck_pmx: ld_ldd_lck@0 {
630 ld_ldd_lck {
631 groups = "ld_ldd_lck_grp";
632 function = "ld_ldd_lck";
633 };
634 };
635
636 lr_lcdrom_pmx: lr_lcdrom@0 {
637 lr_lcdrom {
638 groups = "lr_lcdrom_grp";
639 function = "lr_lcdrom";
640 };
641 };
642
643 lvds_analog_pmx: lvds_analog@0 {
644 lvds_analog {
645 groups = "lvds_analog_grp";
646 function = "lvds_analog";
647 };
648 };
649
650 nd_df_pmx: nd_df@0 {
651 nd_df {
652 groups = "nd_df_grp";
653 function = "nd_df";
654 };
655 };
656
657 nd_df_nowp_pmx: nd_df_nowp@0 {
658 nd_df_nowp {
659 groups = "nd_df_nowp_grp";
660 function = "nd_df_nowp";
661 };
662 };
663
664 ps_pmx: ps@0 {
665 ps {
666 groups = "ps_grp";
667 function = "ps";
668 };
669 };
670
671 pwc_core_on_pmx: pwc_core_on@0 {
672 pwc_core_on {
673 groups = "pwc_core_on_grp";
674 function = "pwc_core_on";
675 };
676 };
677
678 pwc_ext_on_pmx: pwc_ext_on@0 {
679 pwc_ext_on {
680 groups = "pwc_ext_on_grp";
681 function = "pwc_ext_on";
682 };
683 };
684
685 pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
686 pwc_gpio3_clk {
687 groups = "pwc_gpio3_clk_grp";
688 function = "pwc_gpio3_clk";
689 };
690 };
691
692 pwc_io_on_pmx: pwc_io_on@0 {
693 pwc_io_on {
694 groups = "pwc_io_on_grp";
695 function = "pwc_io_on";
696 };
697 };
698
699 pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
700 pwc_lowbatt_b_0 {
701 groups = "pwc_lowbatt_b_grp0";
702 function = "pwc_lowbatt_b_m0";
703 };
704 };
705
706 pwc_mem_on_pmx: pwc_mem_on@0 {
707 pwc_mem_on {
708 groups = "pwc_mem_on_grp";
709 function = "pwc_mem_on";
710 };
711 };
712
713 pwc_on_key_b_pmx0: pwc_on_key_b@0 {
714 pwc_on_key_b_0 {
715 groups = "pwc_on_key_b_grp0";
716 function = "pwc_on_key_b_m0";
717 };
718 };
719
720 pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
721 pwc_wakeup_src0 {
722 groups = "pwc_wakeup_src0_grp";
723 function = "pwc_wakeup_src0";
724 };
725 };
726
727 pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
728 pwc_wakeup_src1 {
729 groups = "pwc_wakeup_src1_grp";
730 function = "pwc_wakeup_src1";
731 };
732 };
733
734 pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
735 pwc_wakeup_src2 {
736 groups = "pwc_wakeup_src2_grp";
737 function = "pwc_wakeup_src2";
738 };
739 };
740
741 pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
742 pwc_wakeup_src3 {
743 groups = "pwc_wakeup_src3_grp";
744 function = "pwc_wakeup_src3";
745 };
746 };
747
748 pw_cko0_pmx0: pw_cko0@0 {
749 pw_cko0_0 {
750 groups = "pw_cko0_grp0";
751 function = "pw_cko0_m0";
752 };
753 };
754
755 pw_cko0_pmx1: pw_cko0@1 {
756 pw_cko0_1 {
757 groups = "pw_cko0_grp1";
758 function = "pw_cko0_m1";
759 };
760 };
761
762 pw_cko0_pmx2: pw_cko0@2 {
763 pw_cko0_2 {
764 groups = "pw_cko0_grp2";
765 function = "pw_cko0_m2";
766 };
767 };
768
769 pw_cko1_pmx0: pw_cko1@0 {
770 pw_cko1_0 {
771 groups = "pw_cko1_grp0";
772 function = "pw_cko1_m0";
773 };
774 };
775
776 pw_cko1_pmx1: pw_cko1@1 {
777 pw_cko1_1 {
778 groups = "pw_cko1_grp1";
779 function = "pw_cko1_m1";
780 };
781 };
782
783 pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
784 pw_i2s01_clk_0 {
785 groups = "pw_i2s01_clk_grp0";
786 function = "pw_i2s01_clk_m0";
787 };
788 };
789
790 pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
791 pw_i2s01_clk_1 {
792 groups = "pw_i2s01_clk_grp1";
793 function = "pw_i2s01_clk_m1";
794 };
795 };
796
797 pw_pwm0_pmx: pw_pwm0@0 {
798 pw_pwm0 {
799 groups = "pw_pwm0_grp";
800 function = "pw_pwm0";
801 };
802 };
803
804 pw_pwm1_pmx: pw_pwm1@0 {
805 pw_pwm1 {
806 groups = "pw_pwm1_grp";
807 function = "pw_pwm1";
808 };
809 };
810
811 pw_pwm2_pmx0: pw_pwm2@0 {
812 pw_pwm2_0 {
813 groups = "pw_pwm2_grp0";
814 function = "pw_pwm2_m0";
815 };
816 };
817
818 pw_pwm2_pmx1: pw_pwm2@1 {
819 pw_pwm2_1 {
820 groups = "pw_pwm2_grp1";
821 function = "pw_pwm2_m1";
822 };
823 };
824
825 pw_pwm3_pmx0: pw_pwm3@0 {
826 pw_pwm3_0 {
827 groups = "pw_pwm3_grp0";
828 function = "pw_pwm3_m0";
829 };
830 };
831
832 pw_pwm3_pmx1: pw_pwm3@1 {
833 pw_pwm3_1 {
834 groups = "pw_pwm3_grp1";
835 function = "pw_pwm3_m1";
836 };
837 };
838
839 pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
840 pw_pwm_cpu_vol_0 {
841 groups = "pw_pwm_cpu_vol_grp0";
842 function = "pw_pwm_cpu_vol_m0";
843 };
844 };
845
846 pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
847 pw_pwm_cpu_vol_1 {
848 groups = "pw_pwm_cpu_vol_grp1";
849 function = "pw_pwm_cpu_vol_m1";
850 };
851 };
852
853 pw_backlight_pmx0: pw_backlight@0 {
854 pw_backlight_0 {
855 groups = "pw_backlight_grp0";
856 function = "pw_backlight_m0";
857 };
858 };
859
860 pw_backlight_pmx1: pw_backlight@1 {
861 pw_backlight_1 {
862 groups = "pw_backlight_grp1";
863 function = "pw_backlight_m1";
864 };
865 };
866
867 rg_eth_mac_pmx: rg_eth_mac@0 {
868 rg_eth_mac {
869 groups = "rg_eth_mac_grp";
870 function = "rg_eth_mac";
871 };
872 };
873
874 rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
875 rg_gmac_phy_intr_n {
876 groups = "rg_gmac_phy_intr_n_grp";
877 function = "rg_gmac_phy_intr_n";
878 };
879 };
880
881 rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
882 rg_rgmii_mac {
883 groups = "rg_rgmii_mac_grp";
884 function = "rg_rgmii_mac";
885 };
886 };
887
888 rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
889 rg_rgmii_phy_ref_clk_0 {
890 groups =
891 "rg_rgmii_phy_ref_clk_grp0";
892 function =
893 "rg_rgmii_phy_ref_clk_m0";
894 };
895 };
896
897 rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
898 rg_rgmii_phy_ref_clk_1 {
899 groups =
900 "rg_rgmii_phy_ref_clk_grp1";
901 function =
902 "rg_rgmii_phy_ref_clk_m1";
903 };
904 };
905
906 sd0_pmx: sd0@0 {
907 sd0 {
908 groups = "sd0_grp";
909 function = "sd0";
910 };
911 };
912
913 sd0_4bit_pmx: sd0_4bit@0 {
914 sd0_4bit {
915 groups = "sd0_4bit_grp";
916 function = "sd0_4bit";
917 };
918 };
919
920 sd1_pmx: sd1@0 {
921 sd1 {
922 groups = "sd1_grp";
923 function = "sd1";
924 };
925 };
926
927 sd1_4bit_pmx0: sd1_4bit@0 {
928 sd1_4bit_0 {
929 groups = "sd1_4bit_grp0";
930 function = "sd1_4bit_m0";
931 };
932 };
933
934 sd1_4bit_pmx1: sd1_4bit@1 {
935 sd1_4bit_1 {
936 groups = "sd1_4bit_grp1";
937 function = "sd1_4bit_m1";
938 };
939 };
940
941 sd2_pmx0: sd2@0 {
942 sd2_0 {
943 groups = "sd2_grp0";
944 function = "sd2_m0";
945 };
946 };
947
948 sd2_no_cdb_pmx0: sd2_no_cdb@0 {
949 sd2_no_cdb_0 {
950 groups = "sd2_no_cdb_grp0";
951 function = "sd2_no_cdb_m0";
952 };
953 };
954
955 sd3_pmx: sd3@0 {
956 sd3 {
957 groups = "sd3_grp";
958 function = "sd3";
959 };
960 };
961
962 sd5_pmx: sd5@0 {
963 sd5 {
964 groups = "sd5_grp";
965 function = "sd5";
966 };
967 };
968
969 sd6_pmx0: sd6@0 {
970 sd6_0 {
971 groups = "sd6_grp0";
972 function = "sd6_m0";
973 };
974 };
975
976 sd6_pmx1: sd6@1 {
977 sd6_1 {
978 groups = "sd6_grp1";
979 function = "sd6_m1";
980 };
981 };
982
983 sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
984 sp0_ext_ldo_on {
985 groups = "sp0_ext_ldo_on_grp";
986 function = "sp0_ext_ldo_on";
987 };
988 };
989
990 sp0_qspi_pmx: sp0_qspi@0 {
991 sp0_qspi {
992 groups = "sp0_qspi_grp";
993 function = "sp0_qspi";
994 };
995 };
996
997 sp1_spi_pmx: sp1_spi@0 {
998 sp1_spi {
999 groups = "sp1_spi_grp";
1000 function = "sp1_spi";
1001 };
1002 };
1003
1004 tpiu_trace_pmx: tpiu_trace@0 {
1005 tpiu_trace {
1006 groups = "tpiu_trace_grp";
1007 function = "tpiu_trace";
1008 };
1009 };
1010
1011 uart0_pmx: uart0@0 {
1012 uart0 {
1013 groups = "uart0_grp";
1014 function = "uart0";
1015 };
1016 };
1017
1018 uart0_nopause_pmx: uart0_nopause@0 {
1019 uart0_nopause {
1020 groups = "uart0_nopause_grp";
1021 function = "uart0_nopause";
1022 };
1023 };
1024
1025 uart1_pmx: uart1@0 {
1026 uart1 {
1027 groups = "uart1_grp";
1028 function = "uart1";
1029 };
1030 };
1031
1032 uart2_pmx: uart2@0 {
1033 uart2 {
1034 groups = "uart2_grp";
1035 function = "uart2";
1036 };
1037 };
1038
1039 uart3_pmx0: uart3@0 {
1040 uart3_0 {
1041 groups = "uart3_grp0";
1042 function = "uart3_m0";
1043 };
1044 };
1045
1046 uart3_pmx1: uart3@1 {
1047 uart3_1 {
1048 groups = "uart3_grp1";
1049 function = "uart3_m1";
1050 };
1051 };
1052
1053 uart3_pmx2: uart3@2 {
1054 uart3_2 {
1055 groups = "uart3_grp2";
1056 function = "uart3_m2";
1057 };
1058 };
1059
1060 uart3_pmx3: uart3@3 {
1061 uart3_3 {
1062 groups = "uart3_grp3";
1063 function = "uart3_m3";
1064 };
1065 };
1066
1067 uart3_nopause_pmx0: uart3_nopause@0 {
1068 uart3_nopause_0 {
1069 groups = "uart3_nopause_grp0";
1070 function = "uart3_nopause_m0";
1071 };
1072 };
1073
1074 uart3_nopause_pmx1: uart3_nopause@1 {
1075 uart3_nopause_1 {
1076 groups = "uart3_nopause_grp1";
1077 function = "uart3_nopause_m1";
1078 };
1079 };
1080
1081 uart4_pmx0: uart4@0 {
1082 uart4_0 {
1083 groups = "uart4_grp0";
1084 function = "uart4_m0";
1085 };
1086 };
1087
1088 uart4_pmx1: uart4@1 {
1089 uart4_1 {
1090 groups = "uart4_grp1";
1091 function = "uart4_m1";
1092 };
1093 };
1094
1095 uart4_pmx2: uart4@2 {
1096 uart4_2 {
1097 groups = "uart4_grp2";
1098 function = "uart4_m2";
1099 };
1100 };
1101
1102 uart4_nopause_pmx: uart4_nopause@0 {
1103 uart4_nopause {
1104 groups = "uart4_nopause_grp";
1105 function = "uart4_nopause";
1106 };
1107 };
1108
1109 usb0_drvvbus_pmx: usb0_drvvbus@0 {
1110 usb0_drvvbus {
1111 groups = "usb0_drvvbus_grp";
1112 function = "usb0_drvvbus";
1113 };
1114 };
1115
1116 usb1_drvvbus_pmx: usb1_drvvbus@0 {
1117 usb1_drvvbus {
1118 groups = "usb1_drvvbus_grp";
1119 function = "usb1_drvvbus";
1120 };
1121 };
1122
1123 visbus_dout_pmx: visbus_dout@0 {
1124 visbus_dout {
1125 groups = "visbus_dout_grp";
1126 function = "visbus_dout";
1127 };
1128 };
1129
1130 vi_vip1_pmx: vi_vip1@0 {
1131 vi_vip1 {
1132 groups = "vi_vip1_grp";
1133 function = "vi_vip1";
1134 };
1135 };
1136
1137 vi_vip1_ext_pmx: vi_vip1_ext@0 {
1138 vi_vip1_ext {
1139 groups = "vi_vip1_ext_grp";
1140 function = "vi_vip1_ext";
1141 };
1142 };
1143
1144 vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
1145 vi_vip1_low8bit {
1146 groups = "vi_vip1_low8bit_grp";
1147 function = "vi_vip1_low8bit";
1148 };
1149 };
1150
1151 vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
1152 vi_vip1_high8bit {
1153 groups = "vi_vip1_high8bit_grp";
1154 function = "vi_vip1_high8bit";
1155 };
1156 };
138 }; 1157 };
139 1158
140 pmipc { 1159 pmipc {
@@ -356,6 +1375,12 @@
356 clock-names = "gpio0_io"; 1375 clock-names = "gpio0_io";
357 gpio-controller; 1376 gpio-controller;
358 interrupt-controller; 1377 interrupt-controller;
1378
1379 gpio-banks = <2>;
1380 gpio-ranges = <&pinctrl 0 0 0>,
1381 <&pinctrl 32 0 0>;
1382 gpio-ranges-group-names = "lvds_gpio_grp",
1383 "uart_nand_gpio_grp";
359 }; 1384 };
360 1385
361 nand@17050000 { 1386 nand@17050000 {
@@ -461,11 +1486,22 @@
461 #interrupt-cells = <2>; 1486 #interrupt-cells = <2>;
462 compatible = "sirf,atlas7-gpio"; 1487 compatible = "sirf,atlas7-gpio";
463 reg = <0x13300000 0x1000>; 1488 reg = <0x13300000 0x1000>;
464 interrupts = <0 43 0>, <0 44 0>, <0 45 0>; 1489 interrupts = <0 43 0>, <0 44 0>,
1490 <0 45 0>, <0 46 0>;
465 clocks = <&car 84>; 1491 clocks = <&car 84>;
466 clock-names = "gpio1_io"; 1492 clock-names = "gpio1_io";
467 gpio-controller; 1493 gpio-controller;
468 interrupt-controller; 1494 interrupt-controller;
1495
1496 gpio-banks = <4>;
1497 gpio-ranges = <&pinctrl 0 0 0>,
1498 <&pinctrl 32 0 0>,
1499 <&pinctrl 64 0 0>,
1500 <&pinctrl 96 0 0>;
1501 gpio-ranges-group-names = "gnss_gpio_grp",
1502 "lcd_vip_gpio_grp",
1503 "sdio_i2s_gpio_grp",
1504 "sp_rgmii_gpio_grp";
469 }; 1505 };
470 1506
471 sd2: sdhci@14200000 { 1507 sd2: sdhci@14200000 {
@@ -744,6 +1780,10 @@
744 interrupts = <0 47 0>; 1780 interrupts = <0 47 0>;
745 gpio-controller; 1781 gpio-controller;
746 interrupt-controller; 1782 interrupt-controller;
1783
1784 gpio-banks = <1>;
1785 gpio-ranges = <&pinctrl 0 0 0>;
1786 gpio-ranges-group-names = "rtc_gpio_grp";
747 }; 1787 };
748 1788
749 rtc-iobg@18840000 { 1789 rtc-iobg@18840000 {
diff --git a/arch/arm/boot/dts/cros-ec-keyboard.dtsi b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
index 9c7fb0acae79..4e42f30cb318 100644
--- a/arch/arm/boot/dts/cros-ec-keyboard.dtsi
+++ b/arch/arm/boot/dts/cros-ec-keyboard.dtsi
@@ -22,6 +22,7 @@
22 MATRIX_KEY(0x00, 0x02, KEY_F1) 22 MATRIX_KEY(0x00, 0x02, KEY_F1)
23 MATRIX_KEY(0x00, 0x03, KEY_B) 23 MATRIX_KEY(0x00, 0x03, KEY_B)
24 MATRIX_KEY(0x00, 0x04, KEY_F10) 24 MATRIX_KEY(0x00, 0x04, KEY_F10)
25 MATRIX_KEY(0x00, 0x05, KEY_RO)
25 MATRIX_KEY(0x00, 0x06, KEY_N) 26 MATRIX_KEY(0x00, 0x06, KEY_N)
26 MATRIX_KEY(0x00, 0x08, KEY_EQUAL) 27 MATRIX_KEY(0x00, 0x08, KEY_EQUAL)
27 MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) 28 MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT)
@@ -34,6 +35,7 @@
34 MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) 35 MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE)
35 MATRIX_KEY(0x01, 0x09, KEY_F9) 36 MATRIX_KEY(0x01, 0x09, KEY_F9)
36 MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) 37 MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE)
38 MATRIX_KEY(0x01, 0x0c, KEY_HENKAN)
37 39
38 MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL) 40 MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL)
39 MATRIX_KEY(0x02, 0x01, KEY_TAB) 41 MATRIX_KEY(0x02, 0x01, KEY_TAB)
@@ -45,6 +47,7 @@
45 MATRIX_KEY(0x02, 0x07, KEY_102ND) 47 MATRIX_KEY(0x02, 0x07, KEY_102ND)
46 MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) 48 MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE)
47 MATRIX_KEY(0x02, 0x09, KEY_F8) 49 MATRIX_KEY(0x02, 0x09, KEY_F8)
50 MATRIX_KEY(0x02, 0x0a, KEY_YEN)
48 51
49 MATRIX_KEY(0x03, 0x01, KEY_GRAVE) 52 MATRIX_KEY(0x03, 0x01, KEY_GRAVE)
50 MATRIX_KEY(0x03, 0x02, KEY_F2) 53 MATRIX_KEY(0x03, 0x02, KEY_F2)
@@ -53,6 +56,7 @@
53 MATRIX_KEY(0x03, 0x06, KEY_6) 56 MATRIX_KEY(0x03, 0x06, KEY_6)
54 MATRIX_KEY(0x03, 0x08, KEY_MINUS) 57 MATRIX_KEY(0x03, 0x08, KEY_MINUS)
55 MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) 58 MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH)
59 MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN)
56 60
57 MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL) 61 MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL)
58 MATRIX_KEY(0x04, 0x01, KEY_A) 62 MATRIX_KEY(0x04, 0x01, KEY_A)
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index aa465904f6cc..096f68be99e2 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -686,7 +686,8 @@
686 686
687&dcan1 { 687&dcan1 {
688 status = "ok"; 688 status = "ok";
689 pinctrl-names = "default", "sleep"; 689 pinctrl-names = "default", "sleep", "active";
690 pinctrl-0 = <&dcan1_pins_default>; 690 pinctrl-0 = <&dcan1_pins_sleep>;
691 pinctrl-1 = <&dcan1_pins_sleep>; 691 pinctrl-1 = <&dcan1_pins_sleep>;
692 pinctrl-2 = <&dcan1_pins_default>;
692}; 693};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..1e29ccf77ea2 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
116 ranges = <0 0x2000 0x2000>; 116 ranges = <0 0x2000 0x2000>;
117 117
118 scm_conf: scm_conf@0 { 118 scm_conf: scm_conf@0 {
119 compatible = "syscon"; 119 compatible = "syscon", "simple-bus";
120 reg = <0x0 0x1400>; 120 reg = <0x0 0x1400>;
121 #address-cells = <1>; 121 #address-cells = <1>;
122 #size-cells = <1>; 122 #size-cells = <1>;
@@ -1140,6 +1140,7 @@
1140 ctrl-module = <&omap_control_sata>; 1140 ctrl-module = <&omap_control_sata>;
1141 clocks = <&sys_clkin1>, <&sata_ref_clk>; 1141 clocks = <&sys_clkin1>, <&sata_ref_clk>;
1142 clock-names = "sysclk", "refclk"; 1142 clock-names = "sysclk", "refclk";
1143 syscon-pllreset = <&scm_conf 0x3fc>;
1143 #phy-cells = <0>; 1144 #phy-cells = <0>;
1144 }; 1145 };
1145 1146
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 4e1b60581782..803738414086 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -587,9 +587,10 @@
587 587
588&dcan1 { 588&dcan1 {
589 status = "ok"; 589 status = "ok";
590 pinctrl-names = "default", "sleep"; 590 pinctrl-names = "default", "sleep", "active";
591 pinctrl-0 = <&dcan1_pins_default>; 591 pinctrl-0 = <&dcan1_pins_sleep>;
592 pinctrl-1 = <&dcan1_pins_sleep>; 592 pinctrl-1 = <&dcan1_pins_sleep>;
593 pinctrl-2 = <&dcan1_pins_default>;
593}; 594};
594 595
595&qspi { 596&qspi {
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
138 138
139 mipi_phy: video-phy@10020710 { 139 mipi_phy: video-phy@10020710 {
140 compatible = "samsung,s5pv210-mipi-video-phy"; 140 compatible = "samsung,s5pv210-mipi-video-phy";
141 reg = <0x10020710 8>;
142 #phy-cells = <1>; 141 #phy-cells = <1>;
142 syscon = <&pmu_system_controller>;
143 }; 143 };
144 144
145 pd_cam: cam-power-domain@10023C00 { 145 pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
127 }; 127 };
128}; 128};
129 129
130&cpu0 {
131 cpu0-supply = <&buck1_reg>;
132};
133
130&fimd { 134&fimd {
131 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; 135 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
132 pinctrl-names = "default"; 136 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
188 }; 188 };
189}; 189};
190 190
191&cpu0 {
192 cpu0-supply = <&varm_breg>;
193};
194
191&dsi_0 { 195&dsi_0 {
192 vddcore-supply = <&vusb_reg>; 196 vddcore-supply = <&vusb_reg>;
193 vddio-supply = <&vmipi_reg>; 197 vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
548 }; 548 };
549}; 549};
550 550
551&cpu0 {
552 cpu0-supply = <&vdd_arm_reg>;
553};
554
551&pinctrl_1 { 555&pinctrl_1 {
552 hdmi_hpd: hdmi-hpd { 556 hdmi_hpd: hdmi-hpd {
553 samsung,pins = "gpx3-7"; 557 samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
40 device_type = "cpu"; 40 device_type = "cpu";
41 compatible = "arm,cortex-a9"; 41 compatible = "arm,cortex-a9";
42 reg = <0x900>; 42 reg = <0x900>;
43 clocks = <&clock CLK_ARM_CLK>;
44 clock-names = "cpu";
45 clock-latency = <160000>;
46
47 operating-points = <
48 1200000 1250000
49 1000000 1150000
50 800000 1075000
51 500000 975000
52 400000 975000
53 200000 950000
54 >;
43 cooling-min-level = <4>; 55 cooling-min-level = <4>;
44 cooling-max-level = <2>; 56 cooling-max-level = <2>;
45 #cooling-cells = <2>; /* min followed by max */ 57 #cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index c892d58e8dad..b995333ea22b 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -468,6 +468,7 @@
468 interrupts = <36 37 38 39 40 41 42 43 44>; 468 interrupts = <36 37 38 39 40 41 42 43 44>;
469 status = "disabled"; 469 status = "disabled";
470 clocks = <&clks 26>; 470 clocks = <&clks 26>;
471 #io-channel-cells = <1>;
471 }; 472 };
472 473
473 spdif@80054000 { 474 spdif@80054000 {
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index dd45e6971bc3..9351296356dc 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12/dts-v1/; 12/dts-v1/;
13#include <dt-bindings/gpio/gpio.h>
13#include <dt-bindings/input/input.h> 14#include <dt-bindings/input/input.h>
14#include "imx25.dtsi" 15#include "imx25.dtsi"
15 16
@@ -114,8 +115,8 @@
114&esdhc1 { 115&esdhc1 {
115 pinctrl-names = "default"; 116 pinctrl-names = "default";
116 pinctrl-0 = <&pinctrl_esdhc1>; 117 pinctrl-0 = <&pinctrl_esdhc1>;
117 cd-gpios = <&gpio2 1 0>; 118 cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
118 wp-gpios = <&gpio2 0 0>; 119 wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
119 status = "okay"; 120 status = "okay";
120}; 121};
121 122
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc215e4b75fd..b69be5c499cf 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -108,7 +108,7 @@
108 }; 108 };
109 109
110 gpt1: timer@10003000 { 110 gpt1: timer@10003000 {
111 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 111 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
112 reg = <0x10003000 0x1000>; 112 reg = <0x10003000 0x1000>;
113 interrupts = <26>; 113 interrupts = <26>;
114 clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>, 114 clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
@@ -117,7 +117,7 @@
117 }; 117 };
118 118
119 gpt2: timer@10004000 { 119 gpt2: timer@10004000 {
120 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 120 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
121 reg = <0x10004000 0x1000>; 121 reg = <0x10004000 0x1000>;
122 interrupts = <25>; 122 interrupts = <25>;
123 clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>, 123 clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>,
@@ -126,7 +126,7 @@
126 }; 126 };
127 127
128 gpt3: timer@10005000 { 128 gpt3: timer@10005000 {
129 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 129 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
130 reg = <0x10005000 0x1000>; 130 reg = <0x10005000 0x1000>;
131 interrupts = <24>; 131 interrupts = <24>;
132 clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>, 132 clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>,
@@ -376,7 +376,7 @@
376 }; 376 };
377 377
378 gpt4: timer@10019000 { 378 gpt4: timer@10019000 {
379 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 379 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
380 reg = <0x10019000 0x1000>; 380 reg = <0x10019000 0x1000>;
381 interrupts = <4>; 381 interrupts = <4>;
382 clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>, 382 clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>,
@@ -385,7 +385,7 @@
385 }; 385 };
386 386
387 gpt5: timer@1001a000 { 387 gpt5: timer@1001a000 {
388 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 388 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
389 reg = <0x1001a000 0x1000>; 389 reg = <0x1001a000 0x1000>;
390 interrupts = <3>; 390 interrupts = <3>;
391 clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>, 391 clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>,
@@ -436,7 +436,7 @@
436 }; 436 };
437 437
438 gpt6: timer@1001f000 { 438 gpt6: timer@1001f000 {
439 compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; 439 compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
440 reg = <0x1001f000 0x1000>; 440 reg = <0x1001f000 0x1000>;
441 interrupts = <2>; 441 interrupts = <2>;
442 clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>, 442 clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>,
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
286 can1: can@53fe4000 { 286 can1: can@53fe4000 {
287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
288 reg = <0x53fe4000 0x1000>; 288 reg = <0x53fe4000 0x1000>;
289 clocks = <&clks 33>; 289 clocks = <&clks 33>, <&clks 33>;
290 clock-names = "ipg"; 290 clock-names = "ipg", "per";
291 interrupts = <43>; 291 interrupts = <43>;
292 status = "disabled"; 292 status = "disabled";
293 }; 293 };
@@ -295,8 +295,8 @@
295 can2: can@53fe8000 { 295 can2: can@53fe8000 {
296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
297 reg = <0x53fe8000 0x1000>; 297 reg = <0x53fe8000 0x1000>;
298 clocks = <&clks 34>; 298 clocks = <&clks 34>, <&clks 34>;
299 clock-names = "ipg"; 299 clock-names = "ipg", "per";
300 interrupts = <44>; 300 interrupts = <44>;
301 status = "disabled"; 301 status = "disabled";
302 }; 302 };
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 93d3ea12328c..0f3fe29b816e 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -98,7 +98,7 @@
98&esdhc1 { 98&esdhc1 {
99 pinctrl-names = "default"; 99 pinctrl-names = "default";
100 pinctrl-0 = <&pinctrl_esdhc1>; 100 pinctrl-0 = <&pinctrl_esdhc1>;
101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; 101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
102 bus-width = <4>; 102 bus-width = <4>;
103 status = "okay"; 103 status = "okay";
104}; 104};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index e9337ad52f59..3bc18835fb4b 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -103,8 +103,8 @@
103&esdhc1 { 103&esdhc1 {
104 pinctrl-names = "default"; 104 pinctrl-names = "default";
105 pinctrl-0 = <&pinctrl_esdhc1>; 105 pinctrl-0 = <&pinctrl_esdhc1>;
106 cd-gpios = <&gpio1 1 0>; 106 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
107 wp-gpios = <&gpio1 9 0>; 107 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
108 status = "okay"; 108 status = "okay";
109}; 109};
110 110
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d0e0f57eb432..53f40885c530 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -124,8 +124,8 @@
124&esdhc1 { 124&esdhc1 {
125 pinctrl-names = "default"; 125 pinctrl-names = "default";
126 pinctrl-0 = <&pinctrl_esdhc1>; 126 pinctrl-0 = <&pinctrl_esdhc1>;
127 cd-gpios = <&gpio1 1 0>; 127 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
128 wp-gpios = <&gpio1 9 0>; 128 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
129 status = "okay"; 129 status = "okay";
130}; 130};
131 131
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 181ae5ebf23f..b0d5542ac829 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -147,8 +147,8 @@
147&esdhc3 { 147&esdhc3 {
148 pinctrl-names = "default"; 148 pinctrl-names = "default";
149 pinctrl-0 = <&pinctrl_esdhc3>; 149 pinctrl-0 = <&pinctrl_esdhc3>;
150 cd-gpios = <&gpio3 11 0>; 150 cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
151 wp-gpios = <&gpio3 12 0>; 151 wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
152 bus-width = <8>; 152 bus-width = <8>;
153 status = "okay"; 153 status = "okay";
154}; 154};
@@ -295,9 +295,10 @@
295&tve { 295&tve {
296 pinctrl-names = "default"; 296 pinctrl-names = "default";
297 pinctrl-0 = <&pinctrl_vga_sync>; 297 pinctrl-0 = <&pinctrl_vga_sync>;
298 ddc-i2c-bus = <&i2c2>;
298 fsl,tve-mode = "vga"; 299 fsl,tve-mode = "vga";
299 fsl,hsync-pin = <4>; 300 fsl,hsync-pin = <7>; /* IPU DI1 PIN7 via EIM_OE */
300 fsl,vsync-pin = <6>; 301 fsl,vsync-pin = <8>; /* IPU DI1 PIN8 via EIM_RW */
301 status = "okay"; 302 status = "okay";
302}; 303};
303 304
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 1d325576bcc0..fc89ce1e5763 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -41,8 +41,8 @@
41&esdhc1 { 41&esdhc1 {
42 pinctrl-names = "default"; 42 pinctrl-names = "default";
43 pinctrl-0 = <&pinctrl_esdhc1>; 43 pinctrl-0 = <&pinctrl_esdhc1>;
44 cd-gpios = <&gpio3 13 0>; 44 cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
45 wp-gpios = <&gpio4 11 0>; 45 wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
46 status = "okay"; 46 status = "okay";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 4f1f0e2868bf..e03373a58760 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -41,8 +41,8 @@
41 pinctrl-0 = <&pinctrl_esdhc2>, 41 pinctrl-0 = <&pinctrl_esdhc2>,
42 <&pinctrl_esdhc2_cdwp>; 42 <&pinctrl_esdhc2_cdwp>;
43 vmmc-supply = <&reg_3p3v>; 43 vmmc-supply = <&reg_3p3v>;
44 wp-gpios = <&gpio1 2 0>; 44 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
45 cd-gpios = <&gpio1 4 0>; 45 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
46 status = "disabled"; 46 status = "disabled";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 704bd72cbfec..d3e50b22064f 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -183,7 +183,7 @@
183}; 183};
184 184
185&esdhc1 { 185&esdhc1 {
186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; 186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
187 fsl,wp-controller; 187 fsl,wp-controller;
188 pinctrl-names = "default"; 188 pinctrl-names = "default";
189 pinctrl-0 = <&pinctrl_esdhc1>; 189 pinctrl-0 = <&pinctrl_esdhc1>;
@@ -191,7 +191,7 @@
191}; 191};
192 192
193&esdhc2 { 193&esdhc2 {
194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; 194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
195 fsl,wp-controller; 195 fsl,wp-controller;
196 pinctrl-names = "default"; 196 pinctrl-names = "default";
197 pinctrl-0 = <&pinctrl_esdhc2>; 197 pinctrl-0 = <&pinctrl_esdhc2>;
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
index c17d3ad6dba5..fc51b87ad208 100644
--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -119,8 +119,8 @@
119&esdhc2 { 119&esdhc2 {
120 pinctrl-names = "default"; 120 pinctrl-names = "default";
121 pinctrl-0 = <&pinctrl_esdhc2>; 121 pinctrl-0 = <&pinctrl_esdhc2>;
122 cd-gpios = <&gpio3 25 0>; 122 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
123 wp-gpios = <&gpio2 19 0>; 123 wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
124 vmmc-supply = <&reg_3p3v>; 124 vmmc-supply = <&reg_3p3v>;
125 status = "okay"; 125 status = "okay";
126}; 126};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 43cb3fd76be7..5111f5170d53 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -305,8 +305,8 @@
305&usdhc2 { 305&usdhc2 {
306 pinctrl-names = "default"; 306 pinctrl-names = "default";
307 pinctrl-0 = <&pinctrl_usdhc2>; 307 pinctrl-0 = <&pinctrl_usdhc2>;
308 cd-gpios = <&gpio1 4 0>; 308 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
309 wp-gpios = <&gpio1 2 0>; 309 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
310 vmmc-supply = <&reg_3p3v>; 310 vmmc-supply = <&reg_3p3v>;
311 status = "okay"; 311 status = "okay";
312}; 312};
@@ -314,8 +314,8 @@
314&usdhc3 { 314&usdhc3 {
315 pinctrl-names = "default"; 315 pinctrl-names = "default";
316 pinctrl-0 = <&pinctrl_usdhc3>; 316 pinctrl-0 = <&pinctrl_usdhc3>;
317 cd-gpios = <&gpio7 0 0>; 317 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
318 wp-gpios = <&gpio7 1 0>; 318 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
319 vmmc-supply = <&reg_3p3v>; 319 vmmc-supply = <&reg_3p3v>;
320 status = "okay"; 320 status = "okay";
321}; 321};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 78df05e9d1ce..d6515f7a56c4 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13/dts-v1/; 13/dts-v1/;
14#include <dt-bindings/gpio/gpio.h>
14#include "imx6q.dtsi" 15#include "imx6q.dtsi"
15 16
16/ { 17/ {
@@ -196,8 +197,8 @@
196}; 197};
197 198
198&usdhc3 { 199&usdhc3 {
199 cd-gpios = <&gpio6 11 0>; 200 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
200 wp-gpios = <&gpio6 14 0>; 201 wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
201 vmmc-supply = <&reg_3p3v>; 202 vmmc-supply = <&reg_3p3v>;
202 pinctrl-names = "default"; 203 pinctrl-names = "default";
203 pinctrl-0 = <&pinctrl_usdhc3 204 pinctrl-0 = <&pinctrl_usdhc3
diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts
index 703539cf36d3..00bd63e63d0c 100644
--- a/arch/arm/boot/dts/imx6q-gk802.dts
+++ b/arch/arm/boot/dts/imx6q-gk802.dts
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9/dts-v1/; 9/dts-v1/;
10#include <dt-bindings/gpio/gpio.h>
10#include "imx6q.dtsi" 11#include "imx6q.dtsi"
11 12
12/ { 13/ {
@@ -161,7 +162,7 @@
161 pinctrl-names = "default"; 162 pinctrl-names = "default";
162 pinctrl-0 = <&pinctrl_usdhc3>; 163 pinctrl-0 = <&pinctrl_usdhc3>;
163 bus-width = <4>; 164 bus-width = <4>;
164 cd-gpios = <&gpio6 11 0>; 165 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
165 vmmc-supply = <&reg_3p3v>; 166 vmmc-supply = <&reg_3p3v>;
166 status = "okay"; 167 status = "okay";
167}; 168};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index a43abfa21e33..5645d52850a7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -251,7 +251,7 @@
251 pinctrl-names = "default"; 251 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc2>; 252 pinctrl-0 = <&pinctrl_usdhc2>;
253 bus-width = <4>; 253 bus-width = <4>;
254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; 254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
255 vmmc-supply = <&reg_3p3v>; 255 vmmc-supply = <&reg_3p3v>;
256 status = "okay"; 256 status = "okay";
257}; 257};
@@ -260,7 +260,7 @@
260 pinctrl-names = "default"; 260 pinctrl-names = "default";
261 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
262 bus-width = <4>; 262 bus-width = <4>;
263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; 263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
265 vmmc-supply = <&reg_3p3v>; 265 vmmc-supply = <&reg_3p3v>;
266 status = "okay"; 266 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index e6d9195a1da7..f4d6ae564ead 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -173,7 +173,7 @@
173 pinctrl-names = "default"; 173 pinctrl-names = "default";
174 pinctrl-0 = <&pinctrl_usdhc1>; 174 pinctrl-0 = <&pinctrl_usdhc1>;
175 vmmc-supply = <&reg_3p3v>; 175 vmmc-supply = <&reg_3p3v>;
176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; 176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
177 status = "okay"; 177 status = "okay";
178}; 178};
179 179
@@ -181,7 +181,7 @@
181 pinctrl-names = "default"; 181 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_usdhc2>; 182 pinctrl-0 = <&pinctrl_usdhc2>;
183 vmmc-supply = <&reg_3p3v>; 183 vmmc-supply = <&reg_3p3v>;
184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; 184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
185 status = "okay"; 185 status = "okay";
186}; 186};
187 187
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 1d85de2befb3..a47a0399a172 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -392,7 +392,7 @@
392&usdhc1 { 392&usdhc1 {
393 pinctrl-names = "default"; 393 pinctrl-names = "default";
394 pinctrl-0 = <&pinctrl_usdhc1>; 394 pinctrl-0 = <&pinctrl_usdhc1>;
395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; 395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
396 no-1-8-v; 396 no-1-8-v;
397 status = "okay"; 397 status = "okay";
398}; 398};
@@ -400,7 +400,7 @@
400&usdhc2 { 400&usdhc2 {
401 pinctrl-names = "default"; 401 pinctrl-names = "default";
402 pinctrl-0 = <&pinctrl_usdhc2>; 402 pinctrl-0 = <&pinctrl_usdhc2>;
403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; 403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
405 no-1-8-v; 405 no-1-8-v;
406 status = "okay"; 406 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 59e5d15e3ec4..ff41f83551de 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -258,6 +258,6 @@
258 pinctrl-names = "default"; 258 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; 259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
260 vmmc-supply = <&reg_3p3v>; 260 vmmc-supply = <&reg_3p3v>;
261 cd-gpios = <&gpio1 4 0>; 261 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
262 status = "okay"; 262 status = "okay";
263}; 263};
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
index 2c253d6d20bd..45e7c39e80d5 100644
--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
@@ -1,3 +1,5 @@
1#include <dt-bindings/gpio/gpio.h>
2
1/ { 3/ {
2 regulators { 4 regulators {
3 compatible = "simple-bus"; 5 compatible = "simple-bus";
@@ -181,7 +183,7 @@
181&usdhc2 { /* module slot */ 183&usdhc2 { /* module slot */
182 pinctrl-names = "default"; 184 pinctrl-names = "default";
183 pinctrl-0 = <&pinctrl_usdhc2>; 185 pinctrl-0 = <&pinctrl_usdhc2>;
184 cd-gpios = <&gpio2 2 0>; 186 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
185 status = "okay"; 187 status = "okay";
186}; 188};
187 189
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index b5756c21ea1d..4493f6e99330 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -318,7 +318,7 @@
318&usdhc3 { 318&usdhc3 {
319 pinctrl-names = "default"; 319 pinctrl-names = "default";
320 pinctrl-0 = <&pinctrl_usdhc3>; 320 pinctrl-0 = <&pinctrl_usdhc3>;
321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
322 vmmc-supply = <&reg_3p3v>; 322 vmmc-supply = <&reg_3p3v>;
323 status = "okay"; 323 status = "okay";
324}; 324};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 86f03c1b147c..a857d1294609 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -324,7 +324,7 @@
324&usdhc3 { 324&usdhc3 {
325 pinctrl-names = "default"; 325 pinctrl-names = "default";
326 pinctrl-0 = <&pinctrl_usdhc3>; 326 pinctrl-0 = <&pinctrl_usdhc3>;
327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
328 vmmc-supply = <&reg_3p3v>; 328 vmmc-supply = <&reg_3p3v>;
329 status = "okay"; 329 status = "okay";
330}; 330};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 4a8d97f47759..1afe3385e2d2 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -417,7 +417,7 @@
417&usdhc3 { 417&usdhc3 {
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3>; 419 pinctrl-0 = <&pinctrl_usdhc3>;
420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
421 vmmc-supply = <&reg_3p3v>; 421 vmmc-supply = <&reg_3p3v>;
422 status = "okay"; 422 status = "okay";
423}; 423};
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
index 62a82f3eba88..6dd0b764e036 100644
--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
@@ -299,6 +299,6 @@
299 &pinctrl_hummingboard_usdhc2 299 &pinctrl_hummingboard_usdhc2
300 >; 300 >;
301 vmmc-supply = <&reg_3p3v>; 301 vmmc-supply = <&reg_3p3v>;
302 cd-gpios = <&gpio1 4 0>; 302 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
303 status = "okay"; 303 status = "okay";
304}; 304};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 3af16dfe417b..d7fe6672d00c 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -453,7 +453,7 @@
453&usdhc3 { 453&usdhc3 {
454 pinctrl-names = "default"; 454 pinctrl-names = "default";
455 pinctrl-0 = <&pinctrl_usdhc3>; 455 pinctrl-0 = <&pinctrl_usdhc3>;
456 cd-gpios = <&gpio7 0 0>; 456 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
457 vmmc-supply = <&reg_3p3v>; 457 vmmc-supply = <&reg_3p3v>;
458 status = "okay"; 458 status = "okay";
459}; 459};
@@ -461,7 +461,7 @@
461&usdhc4 { 461&usdhc4 {
462 pinctrl-names = "default"; 462 pinctrl-names = "default";
463 pinctrl-0 = <&pinctrl_usdhc4>; 463 pinctrl-0 = <&pinctrl_usdhc4>;
464 cd-gpios = <&gpio2 6 0>; 464 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
465 vmmc-supply = <&reg_3p3v>; 465 vmmc-supply = <&reg_3p3v>;
466 status = "okay"; 466 status = "okay";
467}; 467};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 1ce6133b67f5..9e6ecd99b472 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -409,8 +409,8 @@
409&usdhc2 { 409&usdhc2 {
410 pinctrl-names = "default"; 410 pinctrl-names = "default";
411 pinctrl-0 = <&pinctrl_usdhc2>; 411 pinctrl-0 = <&pinctrl_usdhc2>;
412 cd-gpios = <&gpio1 4 0>; 412 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
413 wp-gpios = <&gpio1 2 0>; 413 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
414 status = "disabled"; 414 status = "disabled";
415}; 415};
416 416
@@ -418,7 +418,7 @@
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3 419 pinctrl-0 = <&pinctrl_usdhc3
420 &pinctrl_usdhc3_cdwp>; 420 &pinctrl_usdhc3_cdwp>;
421 cd-gpios = <&gpio1 27 0>; 421 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
422 wp-gpios = <&gpio1 29 0>; 422 wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
423 status = "disabled"; 423 status = "disabled";
424}; 424};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 488a640796ac..3373fd958e95 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -342,7 +342,7 @@
342 pinctrl-0 = <&pinctrl_usdhc2>; 342 pinctrl-0 = <&pinctrl_usdhc2>;
343 bus-width = <4>; 343 bus-width = <4>;
344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; 345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
346 status = "okay"; 346 status = "okay";
347}; 347};
348 348
@@ -351,6 +351,6 @@
351 pinctrl-0 = <&pinctrl_usdhc3>; 351 pinctrl-0 = <&pinctrl_usdhc3>;
352 bus-width = <4>; 352 bus-width = <4>;
353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; 354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
355 status = "okay"; 355 status = "okay";
356}; 356};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 3b24b12651b2..e329ca5c3322 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -467,8 +467,8 @@
467 pinctrl-0 = <&pinctrl_usdhc3>; 467 pinctrl-0 = <&pinctrl_usdhc3>;
468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
470 cd-gpios = <&gpio6 15 0>; 470 cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
471 wp-gpios = <&gpio1 13 0>; 471 wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
472 status = "okay"; 472 status = "okay";
473}; 473};
474 474
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index e00c44f6a0df..782379320517 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -448,8 +448,8 @@
448&usdhc3 { 448&usdhc3 {
449 pinctrl-names = "default"; 449 pinctrl-names = "default";
450 pinctrl-0 = <&pinctrl_usdhc3>; 450 pinctrl-0 = <&pinctrl_usdhc3>;
451 cd-gpios = <&gpio7 0 0>; 451 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
452 wp-gpios = <&gpio7 1 0>; 452 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
453 vmmc-supply = <&reg_3p3v>; 453 vmmc-supply = <&reg_3p3v>;
454 status = "okay"; 454 status = "okay";
455}; 455};
@@ -457,7 +457,7 @@
457&usdhc4 { 457&usdhc4 {
458 pinctrl-names = "default"; 458 pinctrl-names = "default";
459 pinctrl-0 = <&pinctrl_usdhc4>; 459 pinctrl-0 = <&pinctrl_usdhc4>;
460 cd-gpios = <&gpio2 6 0>; 460 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
461 vmmc-supply = <&reg_3p3v>; 461 vmmc-supply = <&reg_3p3v>;
462 status = "okay"; 462 status = "okay";
463}; 463};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index a626e6dd8022..944eb81cb2b8 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -562,8 +562,8 @@
562 pinctrl-names = "default"; 562 pinctrl-names = "default";
563 pinctrl-0 = <&pinctrl_usdhc2>; 563 pinctrl-0 = <&pinctrl_usdhc2>;
564 bus-width = <8>; 564 bus-width = <8>;
565 cd-gpios = <&gpio2 2 0>; 565 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
566 wp-gpios = <&gpio2 3 0>; 566 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
567 status = "okay"; 567 status = "okay";
568}; 568};
569 569
@@ -571,8 +571,8 @@
571 pinctrl-names = "default"; 571 pinctrl-names = "default";
572 pinctrl-0 = <&pinctrl_usdhc3>; 572 pinctrl-0 = <&pinctrl_usdhc3>;
573 bus-width = <8>; 573 bus-width = <8>;
574 cd-gpios = <&gpio2 0 0>; 574 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
575 wp-gpios = <&gpio2 1 0>; 575 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
576 status = "okay"; 576 status = "okay";
577}; 577};
578 578
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index f02b80b41d4f..da08de324e9e 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -680,7 +680,7 @@
680 pinctrl-0 = <&pinctrl_usdhc1>; 680 pinctrl-0 = <&pinctrl_usdhc1>;
681 bus-width = <4>; 681 bus-width = <4>;
682 no-1-8-v; 682 no-1-8-v;
683 cd-gpios = <&gpio7 2 0>; 683 cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
684 fsl,wp-controller; 684 fsl,wp-controller;
685 status = "okay"; 685 status = "okay";
686}; 686};
@@ -690,7 +690,7 @@
690 pinctrl-0 = <&pinctrl_usdhc2>; 690 pinctrl-0 = <&pinctrl_usdhc2>;
691 bus-width = <4>; 691 bus-width = <4>;
692 no-1-8-v; 692 no-1-8-v;
693 cd-gpios = <&gpio7 3 0>; 693 cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
694 fsl,wp-controller; 694 fsl,wp-controller;
695 status = "okay"; 695 status = "okay";
696}; 696};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 5fb091675582..9e096d811bed 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#include <dt-bindings/gpio/gpio.h>
13
12/ { 14/ {
13 regulators { 15 regulators {
14 compatible = "simple-bus"; 16 compatible = "simple-bus";
@@ -250,13 +252,13 @@
250&usdhc1 { 252&usdhc1 {
251 pinctrl-names = "default"; 253 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc1>; 254 pinctrl-0 = <&pinctrl_usdhc1>;
253 cd-gpios = <&gpio1 2 0>; 255 cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
254 status = "okay"; 256 status = "okay";
255}; 257};
256 258
257&usdhc3 { 259&usdhc3 {
258 pinctrl-names = "default"; 260 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
260 cd-gpios = <&gpio3 9 0>; 262 cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
261 status = "okay"; 263 status = "okay";
262}; 264};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d13592080d..b57033e8c633 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
181 interrupt-names = "msi"; 181 interrupt-names = "msi";
182 #interrupt-cells = <1>; 182 #interrupt-cells = <1>;
183 interrupt-map-mask = <0 0 0 0x7>; 183 interrupt-map-mask = <0 0 0 0x7>;
184 interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 184 interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
185 <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 185 <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
186 <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, 186 <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
187 <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; 187 <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>, 188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
189 <&clks IMX6QDL_CLK_LVDS1_GATE>, 189 <&clks IMX6QDL_CLK_LVDS1_GATE>,
190 <&clks IMX6QDL_CLK_PCIE_REF_125M>; 190 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index 945887d3fdb3..b84dff2e94ea 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -617,8 +617,8 @@
617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>; 617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>; 618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
619 bus-width = <8>; 619 bus-width = <8>;
620 cd-gpios = <&gpio4 7 0>; 620 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
621 wp-gpios = <&gpio4 6 0>; 621 wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
622 status = "okay"; 622 status = "okay";
623}; 623};
624 624
@@ -627,8 +627,8 @@
627 pinctrl-0 = <&pinctrl_usdhc2>; 627 pinctrl-0 = <&pinctrl_usdhc2>;
628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>; 628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>; 629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
630 cd-gpios = <&gpio5 0 0>; 630 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
631 wp-gpios = <&gpio4 29 0>; 631 wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
632 status = "okay"; 632 status = "okay";
633}; 633};
634 634
@@ -637,6 +637,6 @@
637 pinctrl-0 = <&pinctrl_usdhc3>; 637 pinctrl-0 = <&pinctrl_usdhc3>;
638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
640 cd-gpios = <&gpio3 22 0>; 640 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
641 status = "okay"; 641 status = "okay";
642}; 642};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index e3c0b63c2205..115f3fd78971 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -49,7 +49,7 @@
49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
51 bus-width = <8>; 51 bus-width = <8>;
52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; 52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; 53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
54 keep-power-in-suspend; 54 keep-power-in-suspend;
55 enable-sdio-wakeup; 55 enable-sdio-wakeup;
@@ -61,7 +61,7 @@
61 pinctrl-names = "default"; 61 pinctrl-names = "default";
62 pinctrl-0 = <&pinctrl_usdhc4>; 62 pinctrl-0 = <&pinctrl_usdhc4>;
63 bus-width = <8>; 63 bus-width = <8>;
64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; 64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
65 no-1-8-v; 65 no-1-8-v;
66 keep-power-in-suspend; 66 keep-power-in-suspend;
67 enable-sdio-wakup; 67 enable-sdio-wakup;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index cef04cef3a80..ac88c3467078 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -293,7 +293,7 @@
293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
295 bus-width = <8>; 295 bus-width = <8>;
296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; 297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
298 keep-power-in-suspend; 298 keep-power-in-suspend;
299 enable-sdio-wakeup; 299 enable-sdio-wakeup;
@@ -304,7 +304,7 @@
304&usdhc4 { 304&usdhc4 {
305 pinctrl-names = "default"; 305 pinctrl-names = "default";
306 pinctrl-0 = <&pinctrl_usdhc4>; 306 pinctrl-0 = <&pinctrl_usdhc4>;
307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; 307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; 308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
309 status = "okay"; 309 status = "okay";
310}; 310};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 4d1a4b977d84..fdd1d7c9a5cc 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -234,8 +234,8 @@
234&usdhc1 { 234&usdhc1 {
235 pinctrl-names = "default"; 235 pinctrl-names = "default";
236 pinctrl-0 = <&pinctrl_usdhc1>; 236 pinctrl-0 = <&pinctrl_usdhc1>;
237 cd-gpios = <&gpio5 0 0>; 237 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
238 wp-gpios = <&gpio5 1 0>; 238 wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
239 enable-sdio-wakeup; 239 enable-sdio-wakeup;
240 keep-power-in-suspend; 240 keep-power-in-suspend;
241 status = "okay"; 241 status = "okay";
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 compatible = "ti,keystone,main-pll-clock"; 14 compatible = "ti,keystone,main-pll-clock";
15 clocks = <&refclksys>; 15 clocks = <&refclksys>;
16 reg = <0x02620350 4>, <0x02310110 4>; 16 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
17 reg-names = "control", "multiplier"; 17 reg-names = "control", "multiplier", "post-divider";
18 fixed-postdiv = <2>;
19 }; 18 };
20 19
21 papllclk: papllclk@2620358 { 20 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 50e555eab50d..675fb8e492c6 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -86,7 +86,7 @@
86 gpio,syscon-dev = <&devctrl 0x240>; 86 gpio,syscon-dev = <&devctrl 0x240>;
87 }; 87 };
88 88
89 pcie@21020000 { 89 pcie1: pcie@21020000 {
90 compatible = "ti,keystone-pcie","snps,dw-pcie"; 90 compatible = "ti,keystone-pcie","snps,dw-pcie";
91 clocks = <&clkpcie1>; 91 clocks = <&clkpcie1>;
92 clock-names = "pcie"; 92 clock-names = "pcie";
@@ -96,6 +96,7 @@
96 ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000 96 ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
97 0x82000000 0 0x60000000 0x60000000 0 0x10000000>; 97 0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
98 98
99 status = "disabled";
99 device_type = "pci"; 100 device_type = "pci";
100 num-lanes = <2>; 101 num-lanes = <2>;
101 102
@@ -130,10 +131,17 @@
130 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>; 131 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
131 }; 132 };
132 }; 133 };
134
135 mdio: mdio@24200f00 {
136 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
137 #address-cells = <1>;
138 #size-cells = <0>;
139 reg = <0x24200f00 0x100>;
140 status = "disabled";
141 clocks = <&clkcpgmac>;
142 clock-names = "fck";
143 bus_freq = <2500000>;
144 };
133 /include/ "k2e-netcp.dtsi" 145 /include/ "k2e-netcp.dtsi"
134 }; 146 };
135}; 147};
136
137&mdio {
138 reg = <0x24200f00 0x100>;
139};
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae6472407b22..d0810a5f2968 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
98 #gpio-cells = <2>; 98 #gpio-cells = <2>;
99 gpio,syscon-dev = <&devctrl 0x25c>; 99 gpio,syscon-dev = <&devctrl 0x25c>;
100 }; 100 };
101
102 mdio: mdio@02090300 {
103 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
104 #address-cells = <1>;
105 #size-cells = <0>;
106 reg = <0x02090300 0x100>;
107 status = "disabled";
108 clocks = <&clkcpgmac>;
109 clock-names = "fck";
110 bus_freq = <2500000>;
111 };
101 /include/ "k2hk-netcp.dtsi" 112 /include/ "k2hk-netcp.dtsi"
102 }; 113 };
103}; 114};
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e007483615e..49fd414f680c 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
29 }; 29 };
30 30
31 soc { 31 soc {
32
33 /include/ "k2l-clocks.dtsi" 32 /include/ "k2l-clocks.dtsi"
34 33
35 uart2: serial@02348400 { 34 uart2: serial@02348400 {
@@ -79,6 +78,17 @@
79 #gpio-cells = <2>; 78 #gpio-cells = <2>;
80 gpio,syscon-dev = <&devctrl 0x24c>; 79 gpio,syscon-dev = <&devctrl 0x24c>;
81 }; 80 };
81
82 mdio: mdio@26200f00 {
83 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
84 #address-cells = <1>;
85 #size-cells = <0>;
86 reg = <0x26200f00 0x100>;
87 status = "disabled";
88 clocks = <&clkcpgmac>;
89 clock-names = "fck";
90 bus_freq = <2500000>;
91 };
82 /include/ "k2l-netcp.dtsi" 92 /include/ "k2l-netcp.dtsi"
83 }; 93 };
84}; 94};
@@ -96,7 +106,3 @@
96 /* Pin muxed. Enabled and configured by Bootloader */ 106 /* Pin muxed. Enabled and configured by Bootloader */
97 status = "disabled"; 107 status = "disabled";
98}; 108};
99
100&mdio {
101 reg = <0x26200f00 0x100>;
102};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index c06542b2c954..72816d65f7ec 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
267 1 0 0x21000A00 0x00000100>; 267 1 0 0x21000A00 0x00000100>;
268 }; 268 };
269 269
270 mdio: mdio@02090300 {
271 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
272 #address-cells = <1>;
273 #size-cells = <0>;
274 reg = <0x02090300 0x100>;
275 status = "disabled";
276 clocks = <&clkpa>;
277 clock-names = "fck";
278 bus_freq = <2500000>;
279 };
280
281 kirq0: keystone_irq@26202a0 { 270 kirq0: keystone_irq@26202a0 {
282 compatible = "ti,keystone-irq"; 271 compatible = "ti,keystone-irq";
283 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>; 272 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
@@ -286,7 +275,7 @@
286 ti,syscon-dev = <&devctrl 0x2a0>; 275 ti,syscon-dev = <&devctrl 0x2a0>;
287 }; 276 };
288 277
289 pcie@21800000 { 278 pcie0: pcie@21800000 {
290 compatible = "ti,keystone-pcie", "snps,dw-pcie"; 279 compatible = "ti,keystone-pcie", "snps,dw-pcie";
291 clocks = <&clkpcie>; 280 clocks = <&clkpcie>;
292 clock-names = "pcie"; 281 clock-names = "pcie";
@@ -296,6 +285,7 @@
296 ranges = <0x81000000 0 0 0x23250000 0 0x4000 285 ranges = <0x81000000 0 0 0x23250000 0 0x4000
297 0x82000000 0 0x50000000 0x50000000 0 0x10000000>; 286 0x82000000 0 0x50000000 0x50000000 0 0x10000000>;
298 287
288 status = "disabled";
299 device_type = "pci"; 289 device_type = "pci";
300 num-lanes = <2>; 290 num-lanes = <2>;
301 291
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963be003..2390f387c271 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
51 }; 51 };
52 52
53 scm_conf: scm_conf@270 { 53 scm_conf: scm_conf@270 {
54 compatible = "syscon"; 54 compatible = "syscon",
55 "simple-bus";
55 reg = <0x270 0x240>; 56 reg = <0x270 0x240>;
56 #address-cells = <1>; 57 #address-cells = <1>;
57 #size-cells = <1>; 58 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index 233c69e50ae3..df8908adb0cb 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -120,7 +120,7 @@
120 120
121 lcd0: display@0 { 121 lcd0: display@0 {
122 compatible = "lgphilips,lb035q02"; 122 compatible = "lgphilips,lb035q02";
123 label = "lcd"; 123 label = "lcd35";
124 124
125 reg = <1>; /* CS1 */ 125 reg = <1>; /* CS1 */
126 spi-max-frequency = <10000000>; 126 spi-max-frequency = <10000000>;
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
index f5395b7da912..048fd216970a 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
@@ -98,7 +98,7 @@
98 98
99 lcd0: display@0 { 99 lcd0: display@0 {
100 compatible = "samsung,lte430wq-f0c", "panel-dpi"; 100 compatible = "samsung,lte430wq-f0c", "panel-dpi";
101 label = "lcd"; 101 label = "lcd43";
102 102
103 pinctrl-names = "default"; 103 pinctrl-names = "default";
104 pinctrl-0 = <&lte430_pins>; 104 pinctrl-0 = <&lte430_pins>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index f884d6adb71e..abc4473e6f8a 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
191 }; 191 };
192 192
193 omap4_padconf_global: omap4_padconf_global@5a0 { 193 omap4_padconf_global: omap4_padconf_global@5a0 {
194 compatible = "syscon"; 194 compatible = "syscon",
195 "simple-bus";
195 reg = <0x5a0 0x170>; 196 reg = <0x5a0 0x170>;
196 #address-cells = <1>; 197 #address-cells = <1>;
197 #size-cells = <1>; 198 #size-cells = <1>;
@@ -551,6 +552,7 @@
551 reg = <0x4a066000 0x100>; 552 reg = <0x4a066000 0x100>;
552 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 553 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
553 ti,hwmods = "mmu_dsp"; 554 ti,hwmods = "mmu_dsp";
555 #iommu-cells = <0>;
554 }; 556 };
555 557
556 mmu_ipu: mmu@55082000 { 558 mmu_ipu: mmu@55082000 {
@@ -558,6 +560,7 @@
558 reg = <0x55082000 0x100>; 560 reg = <0x55082000 0x100>;
559 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; 561 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
560 ti,hwmods = "mmu_ipu"; 562 ti,hwmods = "mmu_ipu";
563 #iommu-cells = <0>;
561 ti,iommu-bus-err-back; 564 ti,iommu-bus-err-back;
562 }; 565 };
563 566
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7d24ae0306b5..b1a1263e6001 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
180 }; 180 };
181 181
182 omap5_padconf_global: omap5_padconf_global@5a0 { 182 omap5_padconf_global: omap5_padconf_global@5a0 {
183 compatible = "syscon"; 183 compatible = "syscon",
184 "simple-bus";
184 reg = <0x5a0 0xec>; 185 reg = <0x5a0 0xec>;
185 #address-cells = <1>; 186 #address-cells = <1>;
186 #size-cells = <1>; 187 #size-cells = <1>;
@@ -612,6 +613,7 @@
612 reg = <0x4a066000 0x100>; 613 reg = <0x4a066000 0x100>;
613 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 614 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
614 ti,hwmods = "mmu_dsp"; 615 ti,hwmods = "mmu_dsp";
616 #iommu-cells = <0>;
615 }; 617 };
616 618
617 mmu_ipu: mmu@55082000 { 619 mmu_ipu: mmu@55082000 {
@@ -619,6 +621,7 @@
619 reg = <0x55082000 0x100>; 621 reg = <0x55082000 0x100>;
620 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>; 622 interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
621 ti,hwmods = "mmu_ipu"; 623 ti,hwmods = "mmu_ipu";
624 #iommu-cells = <0>;
622 ti,iommu-bus-err-back; 625 ti,iommu-bus-err-back;
623 }; 626 };
624 627
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index 71468a7eb28f..5e17fd147728 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -60,27 +60,27 @@
60 rxc-skew-ps = <2000>; 60 rxc-skew-ps = <2000>;
61}; 61};
62 62
63&mmc0 {
64 vmmc-supply = <&regulator_3_3v>;
65 vqmmc-supply = <&regulator_3_3v>;
66};
67
68&usb1 {
69 status = "okay";
70};
71
72&gpio2 { 63&gpio2 {
73 status = "okay"; 64 status = "okay";
74}; 65};
75 66
76&i2c1{ 67&i2c1 {
77 status = "okay"; 68 status = "okay";
78 69
79 accel1: accel1@53{ 70 accel1: accelerometer@53 {
80 compatible = "adxl34x"; 71 compatible = "adi,adxl345";
81 reg = <0x53>; 72 reg = <0x53>;
82 73
83 interrupt-parent = < &portc >; 74 interrupt-parent = <&portc>;
84 interrupts = <3 2>; 75 interrupts = <3 2>;
85 }; 76 };
86}; 77};
78
79&mmc0 {
80 vmmc-supply = <&regulator_3_3v>;
81 vqmmc-supply = <&regulator_3_3v>;
82};
83
84&usb1 {
85 status = "okay";
86};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index d42c84b1df8d..e48857249ce7 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr1310 Evaluation Baord 2 * DTS file for SPEAr1310 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 9d342920695a..54bc6d3cf290 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr1310 SoCs 2 * DTS file for all SPEAr1310 SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index b23e05ed1d60..c611f5606dfe 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr1340 Evaluation Baord 2 * DTS file for SPEAr1340 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index 13e1aa33daa2..df2232d767ed 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr1340 SoCs 2 * DTS file for all SPEAr1340 SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 40accc87e3a2..14594ce8c18a 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr13xx SoCs 2 * DTS file for all SPEAr13xx SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 5de1431653e4..e859e8288bcd 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr300 Evaluation Baord 2 * DTS file for SPEAr300 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index f79b3dfaabe6..f4e92e599729 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr300 SoC 2 * DTS file for SPEAr300 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index b09632963d15..070f2c1b7851 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr310 Evaluation Baord 2 * DTS file for SPEAr310 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index 95372080eea6..da210b454753 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr310 SoC 2 * DTS file for SPEAr310 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index fdedbb514102..1b1034477923 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 Evaluation Baord 2 * DTS file for SPEAr320 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index ffea342aeec9..22be6e5edaac 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 SoC 2 * DTS file for SPEAr320 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index f0e3fcf8e323..118135d75899 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr3xx SoCs 2 * DTS file for all SPEAr3xx SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com> 4 * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
index 32dd55e5f4e6..6eaaf638e52e 100644
--- a/arch/arm/boot/dts/ste-ccu8540.dts
+++ b/arch/arm/boot/dts/ste-ccu8540.dts
@@ -17,6 +17,13 @@
17 model = "ST-Ericsson U8540 platform with Device Tree"; 17 model = "ST-Ericsson U8540 platform with Device Tree";
18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; 18 compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
19 19
20 /* This stablilizes the serial port enumeration */
21 aliases {
22 serial0 = &ux500_serial0;
23 serial1 = &ux500_serial1;
24 serial2 = &ux500_serial2;
25 };
26
20 memory@0 { 27 memory@0 {
21 device_type = "memory"; 28 device_type = "memory";
22 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; 29 reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
diff --git a/arch/arm/boot/dts/ste-ccu9540.dts b/arch/arm/boot/dts/ste-ccu9540.dts
index 651c56d400a4..c8b815819cfe 100644
--- a/arch/arm/boot/dts/ste-ccu9540.dts
+++ b/arch/arm/boot/dts/ste-ccu9540.dts
@@ -16,6 +16,13 @@
16 model = "ST-Ericsson CCU9540 platform with Device Tree"; 16 model = "ST-Ericsson CCU9540 platform with Device Tree";
17 compatible = "st-ericsson,ccu9540", "st-ericsson,u9540"; 17 compatible = "st-ericsson,ccu9540", "st-ericsson,u9540";
18 18
19 /* This stablilizes the serial port enumeration */
20 aliases {
21 serial0 = &ux500_serial0;
22 serial1 = &ux500_serial1;
23 serial2 = &ux500_serial2;
24 };
25
19 memory { 26 memory {
20 reg = <0x00000000 0x20000000>; 27 reg = <0x00000000 0x20000000>;
21 }; 28 };
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index 853684ad7773..b8f81fb418ce 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
15#include "skeleton.dtsi" 15#include "skeleton.dtsi"
16 16
17/ { 17/ {
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21 enable-method = "ste,dbx500-smp";
22
23 cpu-map {
24 cluster0 {
25 core0 {
26 cpu = <&CPU0>;
27 };
28 core1 {
29 cpu = <&CPU1>;
30 };
31 };
32 };
33 CPU0: cpu@300 {
34 device_type = "cpu";
35 compatible = "arm,cortex-a9";
36 reg = <0x300>;
37 };
38 CPU1: cpu@301 {
39 device_type = "cpu";
40 compatible = "arm,cortex-a9";
41 reg = <0x301>;
42 };
43 };
44
18 soc { 45 soc {
19 #address-cells = <1>; 46 #address-cells = <1>;
20 #size-cells = <1>; 47 #size-cells = <1>;
@@ -22,32 +49,6 @@
22 interrupt-parent = <&intc>; 49 interrupt-parent = <&intc>;
23 ranges; 50 ranges;
24 51
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 cpu-map {
30 cluster0 {
31 core0 {
32 cpu = <&CPU0>;
33 };
34 core1 {
35 cpu = <&CPU1>;
36 };
37 };
38 };
39 CPU0: cpu@0 {
40 device_type = "cpu";
41 compatible = "arm,cortex-a9";
42 reg = <0>;
43 };
44 CPU1: cpu@1 {
45 device_type = "cpu";
46 compatible = "arm,cortex-a9";
47 reg = <1>;
48 };
49 };
50
51 ptm@801ae000 { 52 ptm@801ae000 {
52 compatible = "arm,coresight-etm3x", "arm,primecell"; 53 compatible = "arm,coresight-etm3x", "arm,primecell";
53 reg = <0x801ae000 0x1000>; 54 reg = <0x801ae000 0x1000>;
@@ -971,7 +972,7 @@
971 power-domains = <&pm_domains DOMAIN_VAPE>; 972 power-domains = <&pm_domains DOMAIN_VAPE>;
972 }; 973 };
973 974
974 uart@80120000 { 975 ux500_serial0: uart@80120000 {
975 compatible = "arm,pl011", "arm,primecell"; 976 compatible = "arm,pl011", "arm,primecell";
976 reg = <0x80120000 0x1000>; 977 reg = <0x80120000 0x1000>;
977 interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>; 978 interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
@@ -986,7 +987,7 @@
986 status = "disabled"; 987 status = "disabled";
987 }; 988 };
988 989
989 uart@80121000 { 990 ux500_serial1: uart@80121000 {
990 compatible = "arm,pl011", "arm,primecell"; 991 compatible = "arm,pl011", "arm,primecell";
991 reg = <0x80121000 0x1000>; 992 reg = <0x80121000 0x1000>;
992 interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>; 993 interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
@@ -1001,7 +1002,7 @@
1001 status = "disabled"; 1002 status = "disabled";
1002 }; 1003 };
1003 1004
1004 uart@80007000 { 1005 ux500_serial2: uart@80007000 {
1005 compatible = "arm,pl011", "arm,primecell"; 1006 compatible = "arm,pl011", "arm,primecell";
1006 reg = <0x80007000 0x1000>; 1007 reg = <0x80007000 0x1000>;
1007 interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>; 1008 interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 744c1e3a744d..6d8ce154347e 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -32,11 +32,11 @@
32 status = "okay"; 32 status = "okay";
33 }; 33 };
34 34
35 /* This UART is unused and thus left disabled */
35 uart@80121000 { 36 uart@80121000 {
36 pinctrl-names = "default", "sleep"; 37 pinctrl-names = "default", "sleep";
37 pinctrl-0 = <&uart1_default_mode>; 38 pinctrl-0 = <&uart1_default_mode>;
38 pinctrl-1 = <&uart1_sleep_mode>; 39 pinctrl-1 = <&uart1_sleep_mode>;
39 status = "okay";
40 }; 40 };
41 41
42 uart@80007000 { 42 uart@80007000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
index 2b1cb5b584b6..18e9795a94f9 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-stuib.dts
@@ -17,6 +17,13 @@
17 model = "ST-Ericsson HREF (pre-v60) and ST UIB"; 17 model = "ST-Ericsson HREF (pre-v60) and ST UIB";
18 compatible = "st-ericsson,mop500", "st-ericsson,u8500"; 18 compatible = "st-ericsson,mop500", "st-ericsson,u8500";
19 19
20 /* This stablilizes the serial port enumeration */
21 aliases {
22 serial0 = &ux500_serial0;
23 serial1 = &ux500_serial1;
24 serial2 = &ux500_serial2;
25 };
26
20 soc { 27 soc {
21 /* Reset line for the BU21013 touchscreen */ 28 /* Reset line for the BU21013 touchscreen */
22 i2c@80110000 { 29 i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
index 59523f866812..24739914e689 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -16,4 +16,11 @@
16/ { 16/ {
17 model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB"; 17 model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
18 compatible = "st-ericsson,mop500", "st-ericsson,u8500"; 18 compatible = "st-ericsson,mop500", "st-ericsson,u8500";
19
20 /* This stablilizes the serial port enumeration */
21 aliases {
22 serial0 = &ux500_serial0;
23 serial1 = &ux500_serial1;
24 serial2 = &ux500_serial2;
25 };
19}; 26};
diff --git a/arch/arm/boot/dts/ste-hrefprev60.dtsi b/arch/arm/boot/dts/ste-hrefprev60.dtsi
index 7f3975b58d16..b0278f4c486c 100644
--- a/arch/arm/boot/dts/ste-hrefprev60.dtsi
+++ b/arch/arm/boot/dts/ste-hrefprev60.dtsi
@@ -23,6 +23,11 @@
23 }; 23 };
24 24
25 soc { 25 soc {
26 /* Enable UART1 on this board */
27 uart@80121000 {
28 status = "okay";
29 };
30
26 i2c@80004000 { 31 i2c@80004000 {
27 tps61052@33 { 32 tps61052@33 {
28 compatible = "tps61052"; 33 compatible = "tps61052";
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
index 8c6a2de56cf1..c2e1ba019a2f 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
@@ -19,6 +19,13 @@
19 model = "ST-Ericsson HREF (v60+) and ST UIB"; 19 model = "ST-Ericsson HREF (v60+) and ST UIB";
20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; 20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
21 21
22 /* This stablilizes the serial port enumeration */
23 aliases {
24 serial0 = &ux500_serial0;
25 serial1 = &ux500_serial1;
26 serial2 = &ux500_serial2;
27 };
28
22 soc { 29 soc {
23 /* Reset line for the BU21013 touchscreen */ 30 /* Reset line for the BU21013 touchscreen */
24 i2c@80110000 { 31 i2c@80110000 {
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
index d53cccdce776..ebd8547e98f1 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -18,4 +18,11 @@
18/ { 18/ {
19 model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB"; 19 model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500"; 20 compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
21
22 /* This stablilizes the serial port enumeration */
23 aliases {
24 serial0 = &ux500_serial0;
25 serial1 = &ux500_serial1;
26 serial2 = &ux500_serial2;
27 };
21}; 28};
diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
index a4bc9e77d640..810cda743b6d 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi
+++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi
@@ -43,15 +43,26 @@
43 <&vaudio_hf_hrefv60_mode>, 43 <&vaudio_hf_hrefv60_mode>,
44 <&gbf_hrefv60_mode>, 44 <&gbf_hrefv60_mode>,
45 <&hdtv_hrefv60_mode>, 45 <&hdtv_hrefv60_mode>,
46 <&touch_hrefv60_mode>; 46 <&touch_hrefv60_mode>,
47 <&gpios_hrefv60_mode>;
47 48
48 sdi0 { 49 sdi0 {
49 /* SD card detect GPIO pin, extend default state */
50 sdi0_default_mode: sdi0_default { 50 sdi0_default_mode: sdi0_default {
51 /* SD card detect GPIO pin, extend default state */
51 default_hrefv60_cfg1 { 52 default_hrefv60_cfg1 {
52 pins = "GPIO95_E8"; 53 pins = "GPIO95_E8";
53 ste,config = <&gpio_in_pu>; 54 ste,config = <&gpio_in_pu>;
54 }; 55 };
56 /* VMMCI level-shifter enable */
57 default_hrefv60_cfg2 {
58 pins = "GPIO169_D22";
59 ste,config = <&gpio_out_lo>;
60 };
61 /* VMMCI level-shifter voltage select */
62 default_hrefv60_cfg3 {
63 pins = "GPIO5_AG6";
64 ste,config = <&gpio_out_hi>;
65 };
55 }; 66 };
56 }; 67 };
57 ipgpio { 68 ipgpio {
@@ -213,6 +224,16 @@
213 }; 224 };
214 }; 225 };
215 }; 226 };
227 gpios {
228 /* Dangling GPIO pins */
229 gpios_hrefv60_mode: gpios_hrefv60 {
230 default_cfg1 {
231 /* Normally UART1 RXD, now dangling */
232 pins = "GPIO4_AH6";
233 ste,config = <&in_pu>;
234 };
235 };
236 };
216 }; 237 };
217 }; 238 };
218}; 239};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
17 }; 17 };
18 18
19 aliases { 19 aliases {
20 serial1 = &uart1;
20 stmpe-i2c0 = &stmpe0; 21 stmpe-i2c0 = &stmpe0;
21 stmpe-i2c1 = &stmpe1; 22 stmpe-i2c1 = &stmpe1;
22 }; 23 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; 15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
16 }; 16 };
17 17
18 aliases {
19 serial1 = &uart1;
20 };
21
18 src@101e0000 { 22 src@101e0000 {
19 /* These chrystal drivers are not used on this board */ 23 /* These chrystal drivers are not used on this board */
20 disable-sxtalo; 24 disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
757 clock-names = "uartclk", "apb_pclk"; 757 clock-names = "uartclk", "apb_pclk";
758 pinctrl-names = "default"; 758 pinctrl-names = "default";
759 pinctrl-0 = <&uart0_default_mux>; 759 pinctrl-0 = <&uart0_default_mux>;
760 status = "disabled";
760 }; 761 };
761 762
762 uart1: uart@101fb000 { 763 uart1: uart@101fb000 {
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 9edadc37719f..32a5ccb14e7e 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -18,6 +18,13 @@
18 model = "Calao Systems Snowball platform with device tree"; 18 model = "Calao Systems Snowball platform with device tree";
19 compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500"; 19 compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500";
20 20
21 /* This stablilizes the serial port enumeration */
22 aliases {
23 serial0 = &ux500_serial0;
24 serial1 = &ux500_serial1;
25 serial2 = &ux500_serial2;
26 };
27
21 memory { 28 memory {
22 reg = <0x00000000 0x20000000>; 29 reg = <0x00000000 0x20000000>;
23 }; 30 };
@@ -223,11 +230,11 @@
223 status = "okay"; 230 status = "okay";
224 }; 231 };
225 232
233 /* This UART is unused and thus left disabled */
226 uart@80121000 { 234 uart@80121000 {
227 pinctrl-names = "default", "sleep"; 235 pinctrl-names = "default", "sleep";
228 pinctrl-0 = <&uart1_default_mode>; 236 pinctrl-0 = <&uart1_default_mode>;
229 pinctrl-1 = <&uart1_sleep_mode>; 237 pinctrl-1 = <&uart1_sleep_mode>;
230 status = "okay";
231 }; 238 };
232 239
233 uart@80007000 { 240 uart@80007000 {
@@ -452,7 +459,21 @@
452 pins = "GPIO21_AB3"; /* DAT31DIR */ 459 pins = "GPIO21_AB3"; /* DAT31DIR */
453 ste,config = <&out_hi>; 460 ste,config = <&out_hi>;
454 }; 461 };
455 462 /* SD card detect GPIO pin, extend default state */
463 snowball_cfg2 {
464 pins = "GPIO218_AH11";
465 ste,config = <&gpio_in_pu>;
466 };
467 /* VMMCI level-shifter enable */
468 snowball_cfg3 {
469 pins = "GPIO217_AH12";
470 ste,config = <&gpio_out_lo>;
471 };
472 /* VMMCI level-shifter voltage select */
473 snowball_cfg4 {
474 pins = "GPIO228_AJ6";
475 ste,config = <&gpio_out_hi>;
476 };
456 }; 477 };
457 }; 478 };
458 ssp0 { 479 ssp0 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 107395c32d82..17f63f7dfd9e 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -150,6 +150,16 @@
150 interface-type = "ace"; 150 interface-type = "ace";
151 reg = <0x5000 0x1000>; 151 reg = <0x5000 0x1000>;
152 }; 152 };
153
154 pmu@9000 {
155 compatible = "arm,cci-400-pmu,r0";
156 reg = <0x9000 0x5000>;
157 interrupts = <0 105 4>,
158 <0 101 4>,
159 <0 102 4>,
160 <0 103 4>,
161 <0 104 4>;
162 };
153 }; 163 };
154 164
155 memory-controller@7ffd0000 { 165 memory-controller@7ffd0000 {
@@ -187,11 +197,22 @@
187 <1 10 0xf08>; 197 <1 10 0xf08>;
188 }; 198 };
189 199
190 pmu { 200 pmu_a15 {
191 compatible = "arm,cortex-a15-pmu"; 201 compatible = "arm,cortex-a15-pmu";
192 interrupts = <0 68 4>, 202 interrupts = <0 68 4>,
193 <0 69 4>; 203 <0 69 4>;
194 interrupt-affinity = <&cpu0>, <&cpu1>; 204 interrupt-affinity = <&cpu0>,
205 <&cpu1>;
206 };
207
208 pmu_a7 {
209 compatible = "arm,cortex-a7-pmu";
210 interrupts = <0 128 4>,
211 <0 129 4>,
212 <0 130 4>;
213 interrupt-affinity = <&cpu2>,
214 <&cpu3>,
215 <&cpu4>;
195 }; 216 };
196 217
197 oscclk6a: oscclk6a { 218 oscclk6a: oscclk6a {
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6d83a1bf0c74..5fd8df6f50ea 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -353,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y
353CONFIG_POWER_RESET_GPIO=y 353CONFIG_POWER_RESET_GPIO=y
354CONFIG_POWER_RESET_GPIO_RESTART=y 354CONFIG_POWER_RESET_GPIO_RESTART=y
355CONFIG_POWER_RESET_KEYSTONE=y 355CONFIG_POWER_RESET_KEYSTONE=y
356CONFIG_POWER_RESET_SUN6I=y
357CONFIG_POWER_RESET_RMOBILE=y 356CONFIG_POWER_RESET_RMOBILE=y
358CONFIG_SENSORS_LM90=y 357CONFIG_SENSORS_LM90=y
359CONFIG_SENSORS_LM95245=y 358CONFIG_SENSORS_LM95245=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 8ecba00dcd83..7ebc346bf9fa 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -2,6 +2,7 @@ CONFIG_NO_HZ=y
2CONFIG_HIGH_RES_TIMERS=y 2CONFIG_HIGH_RES_TIMERS=y
3CONFIG_BLK_DEV_INITRD=y 3CONFIG_BLK_DEV_INITRD=y
4CONFIG_PERF_EVENTS=y 4CONFIG_PERF_EVENTS=y
5CONFIG_MODULES=y
5CONFIG_ARCH_SUNXI=y 6CONFIG_ARCH_SUNXI=y
6CONFIG_SMP=y 7CONFIG_SMP=y
7CONFIG_NR_CPUS=8 8CONFIG_NR_CPUS=8
@@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y
77CONFIG_GPIO_SYSFS=y 78CONFIG_GPIO_SYSFS=y
78CONFIG_POWER_SUPPLY=y 79CONFIG_POWER_SUPPLY=y
79CONFIG_POWER_RESET=y 80CONFIG_POWER_RESET=y
80CONFIG_POWER_RESET_SUN6I=y
81CONFIG_THERMAL=y 81CONFIG_THERMAL=y
82CONFIG_CPU_THERMAL=y 82CONFIG_CPU_THERMAL=y
83CONFIG_WATCHDOG=y 83CONFIG_WATCHDOG=y
@@ -87,6 +87,10 @@ CONFIG_REGULATOR=y
87CONFIG_REGULATOR_FIXED_VOLTAGE=y 87CONFIG_REGULATOR_FIXED_VOLTAGE=y
88CONFIG_REGULATOR_AXP20X=y 88CONFIG_REGULATOR_AXP20X=y
89CONFIG_REGULATOR_GPIO=y 89CONFIG_REGULATOR_GPIO=y
90CONFIG_FB=y
91CONFIG_FB_SIMPLE=y
92CONFIG_FRAMEBUFFER_CONSOLE=y
93CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
90CONFIG_USB=y 94CONFIG_USB=y
91CONFIG_USB_EHCI_HCD=y 95CONFIG_USB_EHCI_HCD=y
92CONFIG_USB_EHCI_HCD_PLATFORM=y 96CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 83c50193626c..30b3bc1666d2 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += kdebug.h
13generic-y += local.h 13generic-y += local.h
14generic-y += local64.h 14generic-y += local64.h
15generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
16generic-y += mm-arch-hooks.h
16generic-y += msgbuf.h 17generic-y += msgbuf.h
17generic-y += param.h 18generic-y += param.h
18generic-y += parport.h 19generic-y += parport.h
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1c3938f26beb..485982084fe9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
140 * The _caller variety takes a __builtin_return_address(0) value for 140 * The _caller variety takes a __builtin_return_address(0) value for
141 * /proc/vmalloc to use - and should only be used in non-inline functions. 141 * /proc/vmalloc to use - and should only be used in non-inline functions.
142 */ 142 */
143extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
144 size_t, unsigned int, void *);
145extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, 143extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
146 void *); 144 void *);
147
148extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 145extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
149extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
150extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); 146extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
151extern void __iounmap(volatile void __iomem *addr); 147extern void __iounmap(volatile void __iomem *addr);
152extern void __arm_iounmap(volatile void __iomem *addr);
153 148
154extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, 149extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
155 unsigned int, void *); 150 unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
321static inline void memset_io(volatile void __iomem *dst, unsigned c, 316static inline void memset_io(volatile void __iomem *dst, unsigned c,
322 size_t count) 317 size_t count)
323{ 318{
324 memset((void __force *)dst, c, count); 319 extern void mmioset(void *, unsigned int, size_t);
320 mmioset((void __force *)dst, c, count);
325} 321}
326#define memset_io(dst,c,count) memset_io(dst,c,count) 322#define memset_io(dst,c,count) memset_io(dst,c,count)
327 323
328static inline void memcpy_fromio(void *to, const volatile void __iomem *from, 324static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
329 size_t count) 325 size_t count)
330{ 326{
331 memcpy(to, (const void __force *)from, count); 327 extern void mmiocpy(void *, const void *, size_t);
328 mmiocpy(to, (const void __force *)from, count);
332} 329}
333#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) 330#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
334 331
335static inline void memcpy_toio(volatile void __iomem *to, const void *from, 332static inline void memcpy_toio(volatile void __iomem *to, const void *from,
336 size_t count) 333 size_t count)
337{ 334{
338 memcpy((void __force *)to, from, count); 335 extern void mmiocpy(void *, const void *, size_t);
336 mmiocpy((void __force *)to, from, count);
339} 337}
340#define memcpy_toio(to,from,count) memcpy_toio(to,from,count) 338#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
341 339
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
348#endif /* readl */ 346#endif /* readl */
349 347
350/* 348/*
351 * ioremap and friends. 349 * ioremap() and friends.
350 *
351 * ioremap() takes a resource address, and size. Due to the ARM memory
352 * types, it is important to use the correct ioremap() function as each
353 * mapping has specific properties.
354 *
355 * Function Memory type Cacheability Cache hint
356 * ioremap() Device n/a n/a
357 * ioremap_nocache() Device n/a n/a
358 * ioremap_cache() Normal Writeback Read allocate
359 * ioremap_wc() Normal Non-cacheable n/a
360 * ioremap_wt() Normal Non-cacheable n/a
361 *
362 * All device mappings have the following properties:
363 * - no access speculation
364 * - no repetition (eg, on return from an exception)
365 * - number, order and size of accesses are maintained
366 * - unaligned accesses are "unpredictable"
367 * - writes may be delayed before they hit the endpoint device
352 * 368 *
353 * ioremap takes a PCI memory address, as specified in 369 * ioremap_nocache() is the same as ioremap() as there are too many device
354 * Documentation/io-mapping.txt. 370 * drivers using this for device registers, and documentation which tells
371 * people to use it for such for this to be any different. This is not a
372 * safe fallback for memory-like mappings, or memory regions where the
373 * compiler may generate unaligned accesses - eg, via inlining its own
374 * memcpy.
355 * 375 *
376 * All normal memory mappings have the following properties:
377 * - reads can be repeated with no side effects
378 * - repeated reads return the last value written
379 * - reads can fetch additional locations without side effects
380 * - writes can be repeated (in certain cases) with no side effects
381 * - writes can be merged before accessing the target
382 * - unaligned accesses can be supported
383 * - ordering is not guaranteed without explicit dependencies or barrier
384 * instructions
385 * - writes may be delayed before they hit the endpoint memory
386 *
387 * The cache hint is only a performance hint: CPUs may alias these hints.
388 * Eg, a CPU not implementing read allocate but implementing write allocate
389 * will provide a write allocate mapping instead.
356 */ 390 */
357#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 391void __iomem *ioremap(resource_size_t res_cookie, size_t size);
358#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 392#define ioremap ioremap
359#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) 393#define ioremap_nocache ioremap
360#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) 394
361#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) 395void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
362#define iounmap __arm_iounmap 396#define ioremap_cache ioremap_cache
397
398void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
399#define ioremap_wc ioremap_wc
400#define ioremap_wt ioremap_wc
401
402void iounmap(volatile void __iomem *iomem_cookie);
403#define iounmap iounmap
363 404
364/* 405/*
365 * io{read,write}{16,32}be() macros 406 * io{read,write}{16,32}be() macros
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 3a72d69b3255..b7f6fb462ea0 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
275 */ 275 */
276#define __pa(x) __virt_to_phys((unsigned long)(x)) 276#define __pa(x) __virt_to_phys((unsigned long)(x))
277#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 277#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
278#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 278#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
279 279
280extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); 280extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
281 281
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
286 */ 286 */
287static inline phys_addr_t __virt_to_idmap(unsigned long x) 287static inline phys_addr_t __virt_to_idmap(unsigned long x)
288{ 288{
289 if (arch_virt_to_idmap) 289 if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
290 return arch_virt_to_idmap(x); 290 return arch_virt_to_idmap(x);
291 else 291 else
292 return __virt_to_phys(x); 292 return __virt_to_phys(x);
diff --git a/arch/arm/include/asm/mm-arch-hooks.h b/arch/arm/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 7056660c7cc4..000000000000
--- a/arch/arm/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ARM_MM_ARCH_HOOKS_H
13#define _ASM_ARM_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ARM_MM_ARCH_HOOKS_H */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index bfd662e49a25..aeddd28b3595 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -129,7 +129,36 @@
129 129
130/* 130/*
131 * These are the memory types, defined to be compatible with 131 * These are the memory types, defined to be compatible with
132 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB 132 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
133 * ARMv6+ without TEX remapping, they are a table index.
134 * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
135 *
136 * MT type Pre-ARMv6 ARMv6+ type / cacheable status
137 * UNCACHED Uncached Strongly ordered
138 * BUFFERABLE Bufferable Normal memory / non-cacheable
139 * WRITETHROUGH Writethrough Normal memory / write through
140 * WRITEBACK Writeback Normal memory / write back, read alloc
141 * MINICACHE Minicache N/A
142 * WRITEALLOC Writeback Normal memory / write back, write alloc
143 * DEV_SHARED Uncached Device memory (shared)
144 * DEV_NONSHARED Uncached Device memory (non-shared)
145 * DEV_WC Bufferable Normal memory / non-cacheable
146 * DEV_CACHED Writeback Normal memory / write back, read alloc
147 * VECTORS Variable Normal memory / variable
148 *
149 * All normal memory mappings have the following properties:
150 * - reads can be repeated with no side effects
151 * - repeated reads return the last value written
152 * - reads can fetch additional locations without side effects
153 * - writes can be repeated (in certain cases) with no side effects
154 * - writes can be merged before accessing the target
155 * - unaligned accesses can be supported
156 *
157 * All device mappings have the following properties:
158 * - no access speculation
159 * - no repetition (eg, on return from an exception)
160 * - number, order and size of accesses are maintained
161 * - unaligned accesses are "unpredictable"
133 */ 162 */
134#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ 163#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
135#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ 164#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a88671cfe1ff..5e5a51a99e68 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
50 50
51extern void fpundefinstr(void); 51extern void fpundefinstr(void);
52 52
53void mmioset(void *, unsigned int, size_t);
54void mmiocpy(void *, const void *, size_t);
55
53 /* platform dependent support */ 56 /* platform dependent support */
54EXPORT_SYMBOL(arm_delay_ops); 57EXPORT_SYMBOL(arm_delay_ops);
55 58
@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
88EXPORT_SYMBOL(memchr); 91EXPORT_SYMBOL(memchr);
89EXPORT_SYMBOL(__memzero); 92EXPORT_SYMBOL(__memzero);
90 93
94EXPORT_SYMBOL(mmioset);
95EXPORT_SYMBOL(mmiocpy);
96
91#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
92EXPORT_SYMBOL(copy_page); 98EXPORT_SYMBOL(copy_page);
93 99
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7dac3086e361..cb4fb1e69778 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -410,7 +410,7 @@ ENDPROC(__fiq_abt)
410 zero_fp 410 zero_fp
411 411
412 .if \trace 412 .if \trace
413#ifdef CONFIG_IRQSOFF_TRACER 413#ifdef CONFIG_TRACE_IRQFLAGS
414 bl trace_hardirqs_off 414 bl trace_hardirqs_off
415#endif 415#endif
416 ct_user_exit save = 0 416 ct_user_exit save = 0
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..b48dd4f37f80 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@ work_pending:
61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
62 ldmia sp, {r0 - r6} @ have to reload r0 - r6 62 ldmia sp, {r0 - r6} @ have to reload r0 - r6
63 b local_restart @ ... and off we go 63 b local_restart @ ... and off we go
64ENDPROC(ret_fast_syscall)
64 65
65/* 66/*
66 * "slow" syscall return path. "why" tells us if this was a real syscall. 67 * "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..29e2991465cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
399 sub lr, r4, r5 @ mmu has been enabled 399 sub lr, r4, r5 @ mmu has been enabled
400 add r3, r7, lr 400 add r3, r7, lr
401 ldrd r4, [r3, #0] @ get secondary_data.pgdir 401 ldrd r4, [r3, #0] @ get secondary_data.pgdir
402ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
403ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
404ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
402 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir 405 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
403 badr lr, __enable_mmu @ return address 406 badr lr, __enable_mmu @ return address
404 mov r13, r12 @ __secondary_switched address 407 mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 357f57ea83f4..54272e0be713 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -818,12 +818,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
818 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) 818 if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
819 break; 819 break;
820 820
821 of_node_put(dn);
822 if (cpu >= nr_cpu_ids) { 821 if (cpu >= nr_cpu_ids) {
823 pr_warn("Failed to find logical CPU for %s\n", 822 pr_warn("Failed to find logical CPU for %s\n",
824 dn->name); 823 dn->name);
824 of_node_put(dn);
825 break; 825 break;
826 } 826 }
827 of_node_put(dn);
827 828
828 irqs[i] = cpu; 829 irqs[i] = cpu;
829 cpumask_set_cpu(cpu, &pmu->supported_cpus); 830 cpumask_set_cpu(cpu, &pmu->supported_cpus);
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 1a4d232796be..38269358fd25 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
50 flush_cache_all(); 50 flush_cache_all();
51 51
52 /* Switch to the identity mapping. */ 52 /* Switch to the identity mapping. */
53 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 53 phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
54 phys_reset((unsigned long)addr); 54 phys_reset((unsigned long)addr);
55 55
56 /* Should never get here. */ 56 /* Should never get here. */
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 90dfbedfbfb8..3d6b7821cff8 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
578 struct pt_regs *old_regs = set_irq_regs(regs); 578 struct pt_regs *old_regs = set_irq_regs(regs);
579 579
580 if ((unsigned)ipinr < NR_IPI) { 580 if ((unsigned)ipinr < NR_IPI) {
581 trace_ipi_entry(ipi_types[ipinr]); 581 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
582 __inc_irq_stat(cpu, ipi_irqs[ipinr]); 582 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
583 } 583 }
584 584
@@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
637 } 637 }
638 638
639 if ((unsigned)ipinr < NR_IPI) 639 if ((unsigned)ipinr < NR_IPI)
640 trace_ipi_exit(ipi_types[ipinr]); 640 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
641 set_irq_regs(old_regs); 641 set_irq_regs(old_regs);
642} 642}
643 643
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
296 */ 296 */
297void update_vsyscall(struct timekeeper *tk) 297void update_vsyscall(struct timekeeper *tk)
298{ 298{
299 struct timespec xtime_coarse;
300 struct timespec64 *wtm = &tk->wall_to_monotonic; 299 struct timespec64 *wtm = &tk->wall_to_monotonic;
301 300
302 if (!cntvct_ok) { 301 if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
308 307
309 vdso_write_begin(vdso_data); 308 vdso_write_begin(vdso_data);
310 309
311 xtime_coarse = __current_kernel_time();
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk); 310 vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
313 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 311 vdso_data->xtime_coarse_sec = tk->xtime_sec;
314 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 312 vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
313 tk->tkr_mono.shift);
315 vdso_data->wtm_clock_sec = wtm->tv_sec; 314 vdso_data->wtm_clock_sec = wtm->tv_sec;
316 vdso_data->wtm_clock_nsec = wtm->tv_nsec; 315 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
317 316
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 7797e81e40e0..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,8 +61,10 @@
61 61
62/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ 62/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
63 63
64ENTRY(mmiocpy)
64ENTRY(memcpy) 65ENTRY(memcpy)
65 66
66#include "copy_template.S" 67#include "copy_template.S"
67 68
68ENDPROC(memcpy) 69ENDPROC(memcpy)
70ENDPROC(mmiocpy)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a4ee97b5a2bf..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,7 @@
16 .text 16 .text
17 .align 5 17 .align 5
18 18
19ENTRY(mmioset)
19ENTRY(memset) 20ENTRY(memset)
20UNWIND( .fnstart ) 21UNWIND( .fnstart )
21 ands r3, r0, #3 @ 1 unaligned? 22 ands r3, r0, #3 @ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart )
133 b 1b 134 b 1b
134UNWIND( .fnend ) 135UNWIND( .fnend )
135ENDPROC(memset) 136ENDPROC(memset)
137ENDPROC(mmioset)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..4b39af2dfda9 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
96 } 96 }
97 97
98 /* the mmap semaphore is taken only if not in an atomic context */ 98 /* the mmap semaphore is taken only if not in an atomic context */
99 atomic = in_atomic(); 99 atomic = faulthandler_disabled();
100 100
101 if (!atomic) 101 if (!atomic)
102 down_read(&current->mm->mmap_sem); 102 down_read(&current->mm->mmap_sem);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c9d136..4a87e86dec45 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
146 pd->base = of_iomap(np, 0); 146 pd->base = of_iomap(np, 0);
147 if (!pd->base) { 147 if (!pd->base) {
148 pr_warn("%s: failed to map memory\n", __func__); 148 pr_warn("%s: failed to map memory\n", __func__);
149 kfree(pd->pd.name); 149 kfree_const(pd->pd.name);
150 kfree(pd); 150 kfree(pd);
151 of_node_put(np);
152 continue; 151 continue;
153 } 152 }
154 153
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 80bad29d609a..8c4467fad837 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
291 } 291 }
292} 292}
293 293
294#ifdef CONFIG_PM_GENERIC_DOMAINS
295
296static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd) 294static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
297{ 295{
298 int iso, iso2sw; 296 int iso, iso2sw;
@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
399static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg) 397static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
400{ 398{
401 struct clk *clk; 399 struct clk *clk;
402 bool is_off;
403 int i; 400 int i;
404 401
405 imx6q_pu_domain.reg = pu_reg; 402 imx6q_pu_domain.reg = pu_reg;
@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
416 } 413 }
417 imx6q_pu_domain.num_clks = i; 414 imx6q_pu_domain.num_clks = i;
418 415
419 is_off = IS_ENABLED(CONFIG_PM); 416 /* Enable power always in case bootloader disabled it. */
420 if (is_off) { 417 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
421 _imx6q_pm_pu_power_off(&imx6q_pu_domain.base); 418
422 } else { 419 if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
423 /* 420 return 0;
424 * Enable power if compiled without CONFIG_PM in case the
425 * bootloader disabled it.
426 */
427 imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
428 }
429 421
430 pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off); 422 pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
431 return of_genpd_add_provider_onecell(dev->of_node, 423 return of_genpd_add_provider_onecell(dev->of_node,
432 &imx_gpc_onecell_data); 424 &imx_gpc_onecell_data);
433 425
@@ -437,13 +429,6 @@ clk_err:
437 return -EINVAL; 429 return -EINVAL;
438} 430}
439 431
440#else
441static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
442{
443 return 0;
444}
445#endif /* CONFIG_PM_GENERIC_DOMAINS */
446
447static int imx_gpc_probe(struct platform_device *pdev) 432static int imx_gpc_probe(struct platform_device *pdev)
448{ 433{
449 struct regulator *pu_reg; 434 struct regulator *pu_reg;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index ecc04ff13e95..4a023e8d1bdb 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -60,6 +60,7 @@ config SOC_AM43XX
60 select ARM_GIC 60 select ARM_GIC
61 select MACH_OMAP_GENERIC 61 select MACH_OMAP_GENERIC
62 select MIGHT_HAVE_CACHE_L2X0 62 select MIGHT_HAVE_CACHE_L2X0
63 select HAVE_ARM_SCU
63 64
64config SOC_DRA7XX 65config SOC_DRA7XX
65 bool "TI DRA7XX" 66 bool "TI DRA7XX"
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index e1a56d87599e..1ed4be184a29 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void)
117 u8 revision = dma_read(REVISION, 0) & 0xff; 117 u8 revision = dma_read(REVISION, 0) & 0xff;
118 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", 118 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
119 revision >> 4, revision & 0xf); 119 revision >> 4, revision & 0xf);
120 return;
121} 120}
122 121
123static unsigned configure_dma_errata(void) 122static unsigned configure_dma_errata(void)
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 8e52621b5a6b..e1d2e991d17a 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
392 .irq_mask = wakeupgen_mask, 392 .irq_mask = wakeupgen_mask,
393 .irq_unmask = wakeupgen_unmask, 393 .irq_unmask = wakeupgen_unmask,
394 .irq_retrigger = irq_chip_retrigger_hierarchy, 394 .irq_retrigger = irq_chip_retrigger_hierarchy,
395 .irq_set_type = irq_chip_set_type_parent,
395 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 396 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
396#ifdef CONFIG_SMP 397#ifdef CONFIG_SMP
397 .irq_set_affinity = irq_chip_set_affinity_parent, 398 .irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
2373 * registers. This address is needed early so the OCP registers that 2373 * registers. This address is needed early so the OCP registers that
2374 * are part of the device's address space can be ioremapped properly. 2374 * are part of the device's address space can be ioremapped properly.
2375 * 2375 *
2376 * If SYSC access is not needed, the registers will not be remapped
2377 * and non-availability of MPU access is not treated as an error.
2378 *
2376 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and 2379 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
2377 * -ENXIO on absent or invalid register target address space. 2380 * -ENXIO on absent or invalid register target address space.
2378 */ 2381 */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2387 2390
2388 _save_mpu_port_index(oh); 2391 _save_mpu_port_index(oh);
2389 2392
2393 /* if we don't need sysc access we don't need to ioremap */
2394 if (!oh->class->sysc)
2395 return 0;
2396
2397 /* we can't continue without MPU PORT if we need sysc access */
2390 if (oh->_int_flags & _HWMOD_NO_MPU_PORT) 2398 if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
2391 return -ENXIO; 2399 return -ENXIO;
2392 2400
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2396 oh->name); 2404 oh->name);
2397 2405
2398 /* Extract the IO space from device tree blob */ 2406 /* Extract the IO space from device tree blob */
2399 if (!np) 2407 if (!np) {
2408 pr_err("omap_hwmod: %s: no dt node\n", oh->name);
2400 return -ENXIO; 2409 return -ENXIO;
2410 }
2401 2411
2402 va_start = of_iomap(np, index + oh->mpu_rt_idx); 2412 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2403 } else { 2413 } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
2456 oh->name, np->name); 2466 oh->name, np->name);
2457 } 2467 }
2458 2468
2459 if (oh->class->sysc) { 2469 r = _init_mpu_rt_base(oh, NULL, index, np);
2460 r = _init_mpu_rt_base(oh, NULL, index, np); 2470 if (r < 0) {
2461 if (r < 0) { 2471 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2462 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2472 oh->name);
2463 oh->name); 2473 return 0;
2464 return 0;
2465 }
2466 } 2474 }
2467 2475
2468 r = _init_clocks(oh, NULL); 2476 r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
827 .syss_offs = 0x0014, 827 .syss_offs = 0x0014,
828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | 828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
831 SIDLE_SMART_WKUP),
832 .sysc_fields = &omap_hwmod_sysc_type1, 831 .sysc_fields = &omap_hwmod_sysc_type1,
833}; 832};
834 833
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
844 .class = &dra7xx_gpmc_hwmod_class, 843 .class = &dra7xx_gpmc_hwmod_class,
845 .clkdm_name = "l3main1_clkdm", 844 .clkdm_name = "l3main1_clkdm",
846 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ 845 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
847 .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, 846 .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
848 .main_clk = "l3_iclk_div", 847 .main_clk = "l3_iclk_div",
849 .prcm = { 848 .prcm = {
850 .omap4 = { 849 .omap4 = {
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index e03d8b5c9ad0..9ab8932403e5 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -4,6 +4,7 @@ menuconfig ARCH_SIRF
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
5 select GENERIC_IRQ_CHIP 5 select GENERIC_IRQ_CHIP
6 select NO_IOPORT_MAP 6 select NO_IOPORT_MAP
7 select REGMAP
7 select PINCTRL 8 select PINCTRL
8 select PINCTRL_SIRF 9 select PINCTRL_SIRF
9 help 10 help
diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c
index 8f66d8f7ca75..d4852d24dc7d 100644
--- a/arch/arm/mach-prima2/rtciobrg.c
+++ b/arch/arm/mach-prima2/rtciobrg.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * RTC I/O Bridge interfaces for CSR SiRFprimaII 2 * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
3 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module 3 * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
4 * 4 *
5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/regmap.h>
13#include <linux/of.h> 14#include <linux/of.h>
14#include <linux/of_address.h> 15#include <linux/of_address.h>
15#include <linux/of_device.h> 16#include <linux/of_device.h>
@@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr)
66{ 67{
67 unsigned long flags, val; 68 unsigned long flags, val;
68 69
70 /* TODO: add hwspinlock to sync with M3 */
69 spin_lock_irqsave(&rtciobrg_lock, flags); 71 spin_lock_irqsave(&rtciobrg_lock, flags);
70 72
71 val = __sirfsoc_rtc_iobrg_readl(addr); 73 val = __sirfsoc_rtc_iobrg_readl(addr);
@@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
90{ 92{
91 unsigned long flags; 93 unsigned long flags;
92 94
95 /* TODO: add hwspinlock to sync with M3 */
93 spin_lock_irqsave(&rtciobrg_lock, flags); 96 spin_lock_irqsave(&rtciobrg_lock, flags);
94 97
95 sirfsoc_rtc_iobrg_pre_writel(val, addr); 98 sirfsoc_rtc_iobrg_pre_writel(val, addr);
@@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
102} 105}
103EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel); 106EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
104 107
108
109static int regmap_iobg_regwrite(void *context, unsigned int reg,
110 unsigned int val)
111{
112 sirfsoc_rtc_iobrg_writel(val, reg);
113 return 0;
114}
115
116static int regmap_iobg_regread(void *context, unsigned int reg,
117 unsigned int *val)
118{
119 *val = (u32)sirfsoc_rtc_iobrg_readl(reg);
120 return 0;
121}
122
123static struct regmap_bus regmap_iobg = {
124 .reg_write = regmap_iobg_regwrite,
125 .reg_read = regmap_iobg_regread,
126};
127
128/**
129 * devm_regmap_init_iobg(): Initialise managed register map
130 *
131 * @iobg: Device that will be interacted with
132 * @config: Configuration for register map
133 *
134 * The return value will be an ERR_PTR() on error or a valid pointer
135 * to a struct regmap. The regmap will be automatically freed by the
136 * device management code.
137 */
138struct regmap *devm_regmap_init_iobg(struct device *dev,
139 const struct regmap_config *config)
140{
141 const struct regmap_bus *bus = &regmap_iobg;
142
143 return devm_regmap_init(dev, bus, dev, config);
144}
145EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
146
105static const struct of_device_id rtciobrg_ids[] = { 147static const struct of_device_id rtciobrg_ids[] = {
106 { .compatible = "sirf,prima2-rtciobg" }, 148 { .compatible = "sirf,prima2-rtciobg" },
107 {} 149 {}
@@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void)
132} 174}
133postcore_initcall(sirfsoc_rtciobrg_init); 175postcore_initcall(sirfsoc_rtciobrg_init);
134 176
135MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, " 177MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
136 "Barry Song <baohua.song@csr.com>"); 178MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
137MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge"); 179MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
138MODULE_LICENSE("GPL v2"); 180MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
index c092730749b9..bf366b39fa61 100644
--- a/arch/arm/mach-pxa/capc7117.c
+++ b/arch/arm/mach-pxa/capc7117.c
@@ -24,6 +24,7 @@
24#include <linux/ata_platform.h> 24#include <linux/ata_platform.h>
25#include <linux/serial_8250.h> 25#include <linux/serial_8250.h>
26#include <linux/gpio.h> 26#include <linux/gpio.h>
27#include <linux/regulator/machine.h>
27 28
28#include <asm/mach-types.h> 29#include <asm/mach-types.h>
29#include <asm/mach/arch.h> 30#include <asm/mach/arch.h>
@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
144 145
145 capc7117_uarts_init(); 146 capc7117_uarts_init();
146 capc7117_ide_init(); 147 capc7117_ide_init();
148
149 regulator_has_full_constraints();
147} 150}
148 151
149MACHINE_START(CAPC7117, 152MACHINE_START(CAPC7117,
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index bb99f59a36d8..a17a91eb8e9a 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -13,6 +13,7 @@
13#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
14#include <linux/irq.h> 14#include <linux/irq.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/regulator/machine.h>
16 17
17#include <linux/dm9000.h> 18#include <linux/dm9000.h>
18#include <linux/leds.h> 19#include <linux/leds.h>
@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
466 cmx2xx_init_ac97(); 467 cmx2xx_init_ac97();
467 cmx2xx_init_touchscreen(); 468 cmx2xx_init_touchscreen();
468 cmx2xx_init_leds(); 469 cmx2xx_init_leds();
470
471 regulator_has_full_constraints();
469} 472}
470 473
471static void __init cmx2xx_init_irq(void) 474static void __init cmx2xx_init_irq(void)
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 4d3588d26c2a..5851f4c254c1 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
835 cm_x300_init_ac97(); 835 cm_x300_init_ac97();
836 cm_x300_init_wi2wi(); 836 cm_x300_init_wi2wi();
837 cm_x300_init_bl(); 837 cm_x300_init_bl();
838
839 regulator_has_full_constraints();
838} 840}
839 841
840static void __init cm_x300_fixup(struct tag *tags, char **cmdline) 842static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index 5f9d9303b346..3503826333c7 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -18,6 +18,7 @@
18#include <linux/mtd/partitions.h> 18#include <linux/mtd/partitions.h>
19#include <linux/mtd/physmap.h> 19#include <linux/mtd/physmap.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/regulator/machine.h>
21#include <linux/ucb1400.h> 22#include <linux/ucb1400.h>
22 23
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
294 printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n", 295 printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
295 colibri_pxa270_baseboard); 296 colibri_pxa270_baseboard);
296 } 297 }
298
299 regulator_has_full_constraints();
297} 300}
298 301
299/* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either 302/* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 51531ecffca8..9d7072b04045 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
1306 em_x270_init_i2c(); 1306 em_x270_init_i2c();
1307 em_x270_init_camera(); 1307 em_x270_init_camera();
1308 em_x270_userspace_consumers_init(); 1308 em_x270_userspace_consumers_init();
1309
1310 regulator_has_full_constraints();
1309} 1311}
1310 1312
1311MACHINE_START(EM_X270, "Compulab EM-X270") 1313MACHINE_START(EM_X270, "Compulab EM-X270")
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index c98511c5abd1..9b0eb0252af6 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -26,6 +26,7 @@
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/spi/pxa2xx_spi.h> 27#include <linux/spi/pxa2xx_spi.h>
28#include <linux/can/platform/mcp251x.h> 28#include <linux/can/platform/mcp251x.h>
29#include <linux/regulator/machine.h>
29 30
30#include "generic.h" 31#include "generic.h"
31 32
@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
185 mxm_8x10_mmc_init(); 186 mxm_8x10_mmc_init();
186 187
187 icontrol_can_init(); 188 icontrol_can_init();
189
190 regulator_has_full_constraints();
188} 191}
189 192
190MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM") 193MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 872dcb20e757..066e3a250ee0 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -26,6 +26,7 @@
26#include <linux/dm9000.h> 26#include <linux/dm9000.h>
27#include <linux/mtd/physmap.h> 27#include <linux/mtd/physmap.h>
28#include <linux/mtd/partitions.h> 28#include <linux/mtd/partitions.h>
29#include <linux/regulator/machine.h>
29#include <linux/i2c/pxa-i2c.h> 30#include <linux/i2c/pxa-i2c.h>
30 31
31#include <asm/types.h> 32#include <asm/types.h>
@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
534 535
535 BCR_writew(trizeps_conxs_bcr); 536 BCR_writew(trizeps_conxs_bcr);
536 board_backlight_power(1); 537 board_backlight_power(1);
538
539 regulator_has_full_constraints();
537} 540}
538 541
539static void __init trizeps4_map_io(void) 542static void __init trizeps4_map_io(void)
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index aa89488f961e..54122a983ae3 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -24,6 +24,7 @@
24#include <linux/dm9000.h> 24#include <linux/dm9000.h>
25#include <linux/ucb1400.h> 25#include <linux/ucb1400.h>
26#include <linux/ata_platform.h> 26#include <linux/ata_platform.h>
27#include <linux/regulator/machine.h>
27#include <linux/regulator/max1586.h> 28#include <linux/regulator/max1586.h>
28#include <linux/i2c/pxa-i2c.h> 29#include <linux/i2c/pxa-i2c.h>
29 30
@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
711 vpac270_ts_init(); 712 vpac270_ts_init();
712 vpac270_rtc_init(); 713 vpac270_rtc_init();
713 vpac270_ide_init(); 714 vpac270_ide_init();
715
716 regulator_has_full_constraints();
714} 717}
715 718
716MACHINE_START(VPAC270, "Voipac PXA270") 719MACHINE_START(VPAC270, "Voipac PXA270")
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index ac2ae5c71ab4..6158566fa0f7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -868,6 +868,8 @@ static void __init zeus_init(void)
868 i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices)); 868 i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
869 pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info); 869 pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
870 spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info)); 870 spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
871
872 regulator_has_full_constraints();
871} 873}
872 874
873static struct map_desc zeus_io_desc[] __initdata = { 875static struct map_desc zeus_io_desc[] __initdata = {
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index a99d90a4d09c..06640914d9a0 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009-2012 ST Microelectronics 4 * Copyright (C) 2009-2012 ST Microelectronics
5 * Rajeev Kumar <rajeev-dlh.kumar@st.com> 5 * Rajeev Kumar <rajeev-dlh.kumar@st.com>
6 * Viresh Kumar <viresh.linux@gmail.com> 6 * Viresh Kumar <vireshk@kernel.org>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/irqs.h b/arch/arm/mach-spear/include/mach/irqs.h
index 92da0a8c6bce..7058720c5278 100644
--- a/arch/arm/mach-spear/include/mach/irqs.h
+++ b/arch/arm/mach-spear/include/mach/irqs.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009-2012 ST Microelectronics 4 * Copyright (C) 2009-2012 ST Microelectronics
5 * Rajeev Kumar <rajeev-dlh.kumar@st.com> 5 * Rajeev Kumar <rajeev-dlh.kumar@st.com>
6 * Viresh Kumar <viresh.linux@gmail.com> 6 * Viresh Kumar <vireshk@kernel.org>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/misc_regs.h b/arch/arm/mach-spear/include/mach/misc_regs.h
index 935639ce59ba..cfaf7c665b58 100644
--- a/arch/arm/mach-spear/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
4 * Miscellaneous registers definitions for SPEAr3xx machine family 4 * Miscellaneous registers definitions for SPEAr3xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/spear.h b/arch/arm/mach-spear/include/mach/spear.h
index f2d6a0176575..5ed841ccf8a3 100644
--- a/arch/arm/mach-spear/include/mach/spear.h
+++ b/arch/arm/mach-spear/include/mach/spear.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2009,2012 ST Microelectronics 4 * Copyright (C) 2009,2012 ST Microelectronics
5 * Rajeev Kumar<rajeev-dlh.kumar@st.com> 5 * Rajeev Kumar<rajeev-dlh.kumar@st.com>
6 * Viresh Kumar <viresh.linux@gmail.com> 6 * Viresh Kumar <vireshk@kernel.org>
7 * 7 *
8 * This file is licensed under the terms of the GNU General Public 8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any 9 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/include/mach/uncompress.h b/arch/arm/mach-spear/include/mach/uncompress.h
index 51b2dc93e4da..8439b9c12edb 100644
--- a/arch/arm/mach-spear/include/mach/uncompress.h
+++ b/arch/arm/mach-spear/include/mach/uncompress.h
@@ -4,7 +4,7 @@
4 * Serial port stubs for kernel decompress status messages 4 * Serial port stubs for kernel decompress status messages
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/pl080.c b/arch/arm/mach-spear/pl080.c
index cfa1199d0f4a..b4529f3e0ee9 100644
--- a/arch/arm/mach-spear/pl080.c
+++ b/arch/arm/mach-spear/pl080.c
@@ -4,7 +4,7 @@
4 * DMAC pl080 definitions for SPEAr platform 4 * DMAC pl080 definitions for SPEAr platform
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/pl080.h b/arch/arm/mach-spear/pl080.h
index eb6590ded40d..608dec6725ae 100644
--- a/arch/arm/mach-spear/pl080.h
+++ b/arch/arm/mach-spear/pl080.h
@@ -4,7 +4,7 @@
4 * DMAC pl080 definitions for SPEAr platform 4 * DMAC pl080 definitions for SPEAr platform
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/restart.c b/arch/arm/mach-spear/restart.c
index ce5e098c4888..b4342155a783 100644
--- a/arch/arm/mach-spear/restart.c
+++ b/arch/arm/mach-spear/restart.c
@@ -4,7 +4,7 @@
4 * SPEAr platform specific restart functions 4 * SPEAr platform specific restart functions
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear1310.c b/arch/arm/mach-spear/spear1310.c
index d9ce4d8000f0..cd5d375d91f0 100644
--- a/arch/arm/mach-spear/spear1310.c
+++ b/arch/arm/mach-spear/spear1310.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine source file 4 * SPEAr1310 machine source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear1340.c b/arch/arm/mach-spear/spear1340.c
index 3f3c0f124bd3..94594d5a446c 100644
--- a/arch/arm/mach-spear/spear1340.c
+++ b/arch/arm/mach-spear/spear1340.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine source file 4 * SPEAr1340 machine source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear13xx.c b/arch/arm/mach-spear/spear13xx.c
index 2e463a93468d..b7afce6795f4 100644
--- a/arch/arm/mach-spear/spear13xx.c
+++ b/arch/arm/mach-spear/spear13xx.c
@@ -4,7 +4,7 @@
4 * SPEAr13XX machines common source file 4 * SPEAr13XX machines common source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear300.c b/arch/arm/mach-spear/spear300.c
index b52e48f342f4..5b32edda2276 100644
--- a/arch/arm/mach-spear/spear300.c
+++ b/arch/arm/mach-spear/spear300.c
@@ -4,7 +4,7 @@
4 * SPEAr300 machine source file 4 * SPEAr300 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear310.c b/arch/arm/mach-spear/spear310.c
index ed2029db391f..86a44ac7ff67 100644
--- a/arch/arm/mach-spear/spear310.c
+++ b/arch/arm/mach-spear/spear310.c
@@ -4,7 +4,7 @@
4 * SPEAr310 machine source file 4 * SPEAr310 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c
index bf634b32a930..d45d751926c5 100644
--- a/arch/arm/mach-spear/spear320.c
+++ b/arch/arm/mach-spear/spear320.c
@@ -4,7 +4,7 @@
4 * SPEAr320 machine source file 4 * SPEAr320 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c
index bf3b1fd8cb23..23394ac76cf2 100644
--- a/arch/arm/mach-spear/spear3xx.c
+++ b/arch/arm/mach-spear/spear3xx.c
@@ -4,7 +4,7 @@
4 * SPEAr3XX machines common source file 4 * SPEAr3XX machines common source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 81502b90dd91..4efe2d43a126 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -35,7 +35,7 @@ config MACH_SUN7I
35 select SUN5I_HSTIMER 35 select SUN5I_HSTIMER
36 36
37config MACH_SUN8I 37config MACH_SUN8I
38 bool "Allwinner A23 (sun8i) SoCs support" 38 bool "Allwinner sun8i Family SoCs support"
39 default ARCH_SUNXI 39 default ARCH_SUNXI
40 select ARM_GIC 40 select ARM_GIC
41 select MFD_SUN6I_PRCM 41 select MFD_SUN6I_PRCM
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 1bc811a74a9f..65bab2876343 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -67,10 +67,13 @@ MACHINE_END
67 67
68static const char * const sun8i_board_dt_compat[] = { 68static const char * const sun8i_board_dt_compat[] = {
69 "allwinner,sun8i-a23", 69 "allwinner,sun8i-a23",
70 "allwinner,sun8i-a33",
71 "allwinner,sun8i-h3",
70 NULL, 72 NULL,
71}; 73};
72 74
73DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family") 75DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
76 .init_time = sun6i_timer_init,
74 .dt_compat = sun8i_board_dt_compat, 77 .dt_compat = sun8i_board_dt_compat,
75 .init_late = sunxi_dt_cpufreq_init, 78 .init_late = sunxi_dt_cpufreq_init,
76MACHINE_END 79MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1ced8a0f7a52..cba12f34ff77 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1971,7 +1971,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
1971{ 1971{
1972 int next_bitmap; 1972 int next_bitmap;
1973 1973
1974 if (mapping->nr_bitmaps > mapping->extensions) 1974 if (mapping->nr_bitmaps >= mapping->extensions)
1975 return -EINVAL; 1975 return -EINVAL;
1976 1976
1977 next_bitmap = mapping->nr_bitmaps; 1977 next_bitmap = mapping->nr_bitmaps;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d1e5ad7ab3bc..0c81056c1dd7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
255} 255}
256#endif 256#endif
257 257
258void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, 258static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
259 unsigned long offset, size_t size, unsigned int mtype, void *caller) 259 unsigned long offset, size_t size, unsigned int mtype, void *caller)
260{ 260{
261 const struct mem_type *type; 261 const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
363 unsigned int mtype) 363 unsigned int mtype)
364{ 364{
365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, 365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
366 __builtin_return_address(0)); 366 __builtin_return_address(0));
367} 367}
368EXPORT_SYMBOL(__arm_ioremap_pfn); 368EXPORT_SYMBOL(__arm_ioremap_pfn);
369 369
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
371 unsigned int, void *) = 371 unsigned int, void *) =
372 __arm_ioremap_caller; 372 __arm_ioremap_caller;
373 373
374void __iomem * 374void __iomem *ioremap(resource_size_t res_cookie, size_t size)
375__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) 375{
376 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
377 __builtin_return_address(0));
378}
379EXPORT_SYMBOL(ioremap);
380
381void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
382{
383 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
384 __builtin_return_address(0));
385}
386EXPORT_SYMBOL(ioremap_cache);
387
388void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
376{ 389{
377 return arch_ioremap_caller(phys_addr, size, mtype, 390 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
378 __builtin_return_address(0)); 391 __builtin_return_address(0));
379} 392}
380EXPORT_SYMBOL(__arm_ioremap); 393EXPORT_SYMBOL(ioremap_wc);
381 394
382/* 395/*
383 * Remap an arbitrary physical address space into the kernel virtual 396 * Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
431 444
432void (*arch_iounmap)(volatile void __iomem *) = __iounmap; 445void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
433 446
434void __arm_iounmap(volatile void __iomem *io_addr) 447void iounmap(volatile void __iomem *cookie)
435{ 448{
436 arch_iounmap(io_addr); 449 arch_iounmap(cookie);
437} 450}
438EXPORT_SYMBOL(__arm_iounmap); 451EXPORT_SYMBOL(iounmap);
439 452
440#ifdef CONFIG_PCI 453#ifdef CONFIG_PCI
441static int pci_ioremap_mem_type = MT_DEVICE; 454static int pci_ioremap_mem_type = MT_DEVICE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9aa896f..870838a46d52 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void)
1072 int highmem = 0; 1072 int highmem = 0;
1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; 1073 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1074 struct memblock_region *reg; 1074 struct memblock_region *reg;
1075 bool should_use_highmem = false;
1075 1076
1076 for_each_memblock(memory, reg) { 1077 for_each_memblock(memory, reg) {
1077 phys_addr_t block_start = reg->base; 1078 phys_addr_t block_start = reg->base;
@@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void)
1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1091 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1091 &block_start, &block_end); 1092 &block_start, &block_end);
1092 memblock_remove(reg->base, reg->size); 1093 memblock_remove(reg->base, reg->size);
1094 should_use_highmem = true;
1093 continue; 1095 continue;
1094 } 1096 }
1095 1097
@@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void)
1100 &block_start, &block_end, &vmalloc_limit); 1102 &block_start, &block_end, &vmalloc_limit);
1101 memblock_remove(vmalloc_limit, overlap_size); 1103 memblock_remove(vmalloc_limit, overlap_size);
1102 block_end = vmalloc_limit; 1104 block_end = vmalloc_limit;
1105 should_use_highmem = true;
1103 } 1106 }
1104 } 1107 }
1105 1108
@@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void)
1134 } 1137 }
1135 } 1138 }
1136 1139
1140 if (should_use_highmem)
1141 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1142
1137 high_memory = __va(arm_lowmem_limit - 1) + 1; 1143 high_memory = __va(arm_lowmem_limit - 1) + 1;
1138 1144
1139 /* 1145 /*
@@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc)
1494 build_mem_type_table(); 1500 build_mem_type_table();
1495 prepare_page_table(); 1501 prepare_page_table();
1496 map_lowmem(); 1502 map_lowmem();
1503 memblock_set_current_limit(arm_lowmem_limit);
1497 dma_contiguous_remap(); 1504 dma_contiguous_remap();
1498 devicemaps_init(mdesc); 1505 devicemaps_init(mdesc);
1499 kmap_init(); 1506 kmap_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index afd7e05d95f1..1dd10936d68d 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
351} 351}
352EXPORT_SYMBOL(__arm_ioremap_pfn); 352EXPORT_SYMBOL(__arm_ioremap_pfn);
353 353
354void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, 354void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
355 size_t size, unsigned int mtype, void *caller) 355 unsigned int mtype, void *caller)
356{ 356{
357 return __arm_ioremap_pfn(pfn, offset, size, mtype); 357 return (void __iomem *)phys_addr;
358} 358}
359 359
360void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, 360void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
361 unsigned int mtype) 361
362void __iomem *ioremap(resource_size_t res_cookie, size_t size)
362{ 363{
363 return (void __iomem *)phys_addr; 364 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
365 __builtin_return_address(0));
364} 366}
365EXPORT_SYMBOL(__arm_ioremap); 367EXPORT_SYMBOL(ioremap);
366 368
367void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); 369void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
370{
371 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
372 __builtin_return_address(0));
373}
374EXPORT_SYMBOL(ioremap_cache);
368 375
369void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, 376void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
370 unsigned int mtype, void *caller) 377{
378 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
379 __builtin_return_address(0));
380}
381EXPORT_SYMBOL(ioremap_wc);
382
383void __iounmap(volatile void __iomem *addr)
371{ 384{
372 return __arm_ioremap(phys_addr, size, mtype);
373} 385}
386EXPORT_SYMBOL(__iounmap);
374 387
375void (*arch_iounmap)(volatile void __iomem *); 388void (*arch_iounmap)(volatile void __iomem *);
376 389
377void __arm_iounmap(volatile void __iomem *addr) 390void iounmap(volatile void __iomem *addr)
378{ 391{
379} 392}
380EXPORT_SYMBOL(__arm_iounmap); 393EXPORT_SYMBOL(iounmap);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0716bbe19872..de2b246fed38 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
274__v7_b15mp_setup: 274__v7_b15mp_setup:
275__v7_ca17mp_setup: 275__v7_ca17mp_setup:
276 mov r10, #0 276 mov r10, #0
2771: 2771: adr r12, __v7_setup_stack @ the local stack
278 stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
279 bl v7_invalidate_l1
280 ldmia r12, {r0-r5, lr}
278#ifdef CONFIG_SMP 281#ifdef CONFIG_SMP
279 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) 282 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
280 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP 283 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
283 orreq r0, r0, r10 @ Enable CPU-specific SMP bits 286 orreq r0, r0, r10 @ Enable CPU-specific SMP bits
284 mcreq p15, 0, r0, c1, c0, 1 287 mcreq p15, 0, r0, c1, c0, 1
285#endif 288#endif
286 b __v7_setup 289 b __v7_setup_cont
287 290
288/* 291/*
289 * Errata: 292 * Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
413 416
414__v7_setup: 417__v7_setup:
415 adr r12, __v7_setup_stack @ the local stack 418 adr r12, __v7_setup_stack @ the local stack
416 stmia r12, {r0-r5, r7, r9, r11, lr} 419 stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
417 bl v7_invalidate_l1 420 bl v7_invalidate_l1
418 ldmia r12, {r0-r5, r7, r9, r11, lr} 421 ldmia r12, {r0-r5, lr}
419 422
423__v7_setup_cont:
420 and r0, r9, #0xff000000 @ ARM? 424 and r0, r9, #0xff000000 @ ARM?
421 teq r0, #0x41000000 425 teq r0, #0x41000000
422 bne __errata_finish 426 bne __errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
480 484
481 .align 2 485 .align 2
482__v7_setup_stack: 486__v7_setup_stack:
483 .space 4 * 11 @ 11 registers 487 .space 4 * 7 @ 12 registers
484 488
485 __INITDATA 489 __INITDATA
486 490
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 4550d247e308..c011e2296cb1 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
74 74
75int bpf_jit_enable __read_mostly; 75int bpf_jit_enable __read_mostly;
76 76
77static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
78{ 89{
79 u8 ret; 90 u8 ret;
80 int err; 91 int err;
81 92
82 err = skb_copy_bits(skb, offset, &ret, 1); 93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
83 97
84 return (u64)err << 32 | ret; 98 return (u64)err << 32 | ret;
85} 99}
86 100
87static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
88{ 102{
89 u16 ret; 103 u16 ret;
90 int err; 104 int err;
91 105
92 err = skb_copy_bits(skb, offset, &ret, 2); 106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
93 110
94 return (u64)err << 32 | ntohs(ret); 111 return (u64)err << 32 | ntohs(ret);
95} 112}
96 113
97static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
98{ 115{
99 u32 ret; 116 u32 ret;
100 int err; 117 int err;
101 118
102 err = skb_copy_bits(skb, offset, &ret, 4); 119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
103 123
104 return (u64)err << 32 | ntohl(ret); 124 return (u64)err << 32 | ntohl(ret);
105} 125}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536 case BPF_LD | BPF_B | BPF_ABS: 556 case BPF_LD | BPF_B | BPF_ABS:
537 load_order = 0; 557 load_order = 0;
538load: 558load:
539 /* the interpreter will deal with the negative K */
540 if ((int)k < 0)
541 return -ENOTSUPP;
542 emit_mov_i(r_off, k, ctx); 559 emit_mov_i(r_off, k, ctx);
543load_common: 560load_common:
544 ctx->seen |= SEEN_DATA | SEEN_CALL; 561 ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
547 emit(ARM_SUB_I(r_scratch, r_skb_hl, 564 emit(ARM_SUB_I(r_scratch, r_skb_hl,
548 1 << load_order), ctx); 565 1 << load_order), ctx);
549 emit(ARM_CMP_R(r_scratch, r_off), ctx); 566 emit(ARM_CMP_R(r_scratch, r_off), ctx);
550 condt = ARM_COND_HS; 567 condt = ARM_COND_GE;
551 } else { 568 } else {
552 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 569 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
553 condt = ARM_COND_HI; 570 condt = ARM_COND_HI;
554 } 571 }
555 572
573 /*
574 * test for negative offset, only if we are
575 * currently scheduled to take the fast
576 * path. this will update the flags so that
577 * the slowpath instruction are ignored if the
578 * offset is negative.
579 *
580 * for loard_order == 0 the HI condition will
581 * make loads at offset 0 take the slow path too.
582 */
583 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
584
556 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 585 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
557 ctx); 586 ctx);
558 587
@@ -860,9 +889,11 @@ b_epilogue:
860 off = offsetof(struct sk_buff, vlan_tci); 889 off = offsetof(struct sk_buff, vlan_tci);
861 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 890 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
862 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 891 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
863 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 892 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
864 else 893 else {
865 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 894 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
895 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
896 }
866 break; 897 break;
867 case BPF_ANC | SKF_AD_QUEUE: 898 case BPF_ANC | SKF_AD_QUEUE:
868 ctx->seen |= SEEN_SKB; 899 ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
14VDSO_LDFLAGS += -nostdlib -shared 14VDSO_LDFLAGS += -nostdlib -shared
15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) 16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
17VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd) 17VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
18 18
19obj-$(CONFIG_VDSO) += vdso.o 19obj-$(CONFIG_VDSO) += vdso.o
20extra-$(CONFIG_VDSO) += vdso.lds 20extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index 9005b07296c8..aedec81d1198 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,13 +45,11 @@
45 * it does. 45 * it does.
46 */ 46 */
47 47
48#define _GNU_SOURCE
49
50#include <byteswap.h> 48#include <byteswap.h>
51#include <elf.h> 49#include <elf.h>
52#include <errno.h> 50#include <errno.h>
53#include <error.h>
54#include <fcntl.h> 51#include <fcntl.h>
52#include <stdarg.h>
55#include <stdbool.h> 53#include <stdbool.h>
56#include <stdio.h> 54#include <stdio.h>
57#include <stdlib.h> 55#include <stdlib.h>
@@ -82,11 +80,25 @@
82#define EF_ARM_ABI_FLOAT_HARD 0x400 80#define EF_ARM_ABI_FLOAT_HARD 0x400
83#endif 81#endif
84 82
83static int failed;
84static const char *argv0;
85static const char *outfile; 85static const char *outfile;
86 86
87static void fail(const char *fmt, ...)
88{
89 va_list ap;
90
91 failed = 1;
92 fprintf(stderr, "%s: ", argv0);
93 va_start(ap, fmt);
94 vfprintf(stderr, fmt, ap);
95 va_end(ap);
96 exit(EXIT_FAILURE);
97}
98
87static void cleanup(void) 99static void cleanup(void)
88{ 100{
89 if (error_message_count > 0 && outfile != NULL) 101 if (failed && outfile != NULL)
90 unlink(outfile); 102 unlink(outfile);
91} 103}
92 104
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
119 int infd; 131 int infd;
120 132
121 atexit(cleanup); 133 atexit(cleanup);
134 argv0 = argv[0];
122 135
123 if (argc != 3) 136 if (argc != 3)
124 error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]); 137 fail("Usage: %s [infile] [outfile]\n", argv[0]);
125 138
126 infile = argv[1]; 139 infile = argv[1];
127 outfile = argv[2]; 140 outfile = argv[2];
128 141
129 infd = open(infile, O_RDONLY); 142 infd = open(infile, O_RDONLY);
130 if (infd < 0) 143 if (infd < 0)
131 error(EXIT_FAILURE, errno, "Cannot open %s", infile); 144 fail("Cannot open %s: %s\n", infile, strerror(errno));
132 145
133 if (fstat(infd, &stat) != 0) 146 if (fstat(infd, &stat) != 0)
134 error(EXIT_FAILURE, errno, "Failed stat for %s", infile); 147 fail("Failed stat for %s: %s\n", infile, strerror(errno));
135 148
136 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0); 149 inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
137 if (inbuf == MAP_FAILED) 150 if (inbuf == MAP_FAILED)
138 error(EXIT_FAILURE, errno, "Failed to map %s", infile); 151 fail("Failed to map %s: %s\n", infile, strerror(errno));
139 152
140 close(infd); 153 close(infd);
141 154
142 inhdr = inbuf; 155 inhdr = inbuf;
143 156
144 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0) 157 if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
145 error(EXIT_FAILURE, 0, "Not an ELF file"); 158 fail("Not an ELF file\n");
146 159
147 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32) 160 if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
148 error(EXIT_FAILURE, 0, "Unsupported ELF class"); 161 fail("Unsupported ELF class\n");
149 162
150 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER; 163 swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
151 164
152 if (read_elf_half(inhdr->e_type, swap) != ET_DYN) 165 if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
153 error(EXIT_FAILURE, 0, "Not a shared object"); 166 fail("Not a shared object\n");
154 167
155 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) { 168 if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
156 error(EXIT_FAILURE, 0, "Unsupported architecture %#x", 169 fail("Unsupported architecture %#x\n", inhdr->e_machine);
157 inhdr->e_machine);
158 }
159 170
160 e_flags = read_elf_word(inhdr->e_flags, swap); 171 e_flags = read_elf_word(inhdr->e_flags, swap);
161 172
162 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) { 173 if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
163 error(EXIT_FAILURE, 0, "Unsupported EABI version %#x", 174 fail("Unsupported EABI version %#x\n",
164 EF_ARM_EABI_VERSION(e_flags)); 175 EF_ARM_EABI_VERSION(e_flags));
165 } 176 }
166 177
167 if (e_flags & EF_ARM_ABI_FLOAT_HARD) 178 if (e_flags & EF_ARM_ABI_FLOAT_HARD)
168 error(EXIT_FAILURE, 0, 179 fail("Unexpected hard-float flag set in e_flags\n");
169 "Unexpected hard-float flag set in e_flags");
170 180
171 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT); 181 clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
172 182
173 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); 183 outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
174 if (outfd < 0) 184 if (outfd < 0)
175 error(EXIT_FAILURE, errno, "Cannot open %s", outfile); 185 fail("Cannot open %s: %s\n", outfile, strerror(errno));
176 186
177 if (ftruncate(outfd, stat.st_size) != 0) 187 if (ftruncate(outfd, stat.st_size) != 0)
178 error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile); 188 fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
179 189
180 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, 190 outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
181 outfd, 0); 191 outfd, 0);
182 if (outbuf == MAP_FAILED) 192 if (outbuf == MAP_FAILED)
183 error(EXIT_FAILURE, errno, "Failed to map %s", outfile); 193 fail("Failed to map %s: %s\n", outfile, strerror(errno));
184 194
185 close(outfd); 195 close(outfd);
186 196
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
195 } 205 }
196 206
197 if (msync(outbuf, stat.st_size, MS_SYNC) != 0) 207 if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
198 error(EXIT_FAILURE, errno, "Failed to sync %s", outfile); 208 fail("Failed to sync %s: %s\n", outfile, strerror(errno));
199 209
200 return EXIT_SUCCESS; 210 return EXIT_SUCCESS;
201} 211}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0f6edb14b7e4..318175f62c24 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -23,9 +23,9 @@ config ARM64
23 select BUILDTIME_EXTABLE_SORT 23 select BUILDTIME_EXTABLE_SORT
24 select CLONE_BACKWARDS 24 select CLONE_BACKWARDS
25 select COMMON_CLK 25 select COMMON_CLK
26 select EDAC_SUPPORT
27 select CPU_PM if (SUSPEND || CPU_IDLE) 26 select CPU_PM if (SUSPEND || CPU_IDLE)
28 select DCACHE_WORD_ACCESS 27 select DCACHE_WORD_ACCESS
28 select EDAC_SUPPORT
29 select GENERIC_ALLOCATOR 29 select GENERIC_ALLOCATOR
30 select GENERIC_CLOCKEVENTS 30 select GENERIC_CLOCKEVENTS
31 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 31 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 83578e766b94..4c55833d8a41 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -23,6 +23,16 @@
23 device_type = "memory"; 23 device_type = "memory";
24 reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */ 24 reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
25 }; 25 };
26
27 gpio-keys {
28 compatible = "gpio-keys";
29 button@1 {
30 label = "POWER";
31 linux,code = <116>;
32 linux,input-type = <0x1>;
33 interrupts = <0x0 0x2d 0x1>;
34 };
35 };
26}; 36};
27 37
28&pcie0clk { 38&pcie0clk {
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
823 device_type = "dma"; 823 device_type = "dma";
824 reg = <0x0 0x1f270000 0x0 0x10000>, 824 reg = <0x0 0x1f270000 0x0 0x10000>,
825 <0x0 0x1f200000 0x0 0x10000>, 825 <0x0 0x1f200000 0x0 0x10000>,
826 <0x0 0x1b008000 0x0 0x2000>, 826 <0x0 0x1b000000 0x0 0x400000>,
827 <0x0 0x1054a000 0x0 0x100>; 827 <0x0 0x1054a000 0x0 0x100>;
828 interrupts = <0x0 0x82 0x4>, 828 interrupts = <0x0 0x82 0x4>,
829 <0x0 0xb8 0x4>, 829 <0x0 0xb8 0x4>,
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index c5c98b91514e..bb3c07209676 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -1,6 +1,7 @@
1dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb 1dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb
2dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb 2dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb
3dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb 3dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
4dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
4 5
5always := $(dtb-y) 6always := $(dtb-y)
6subdir-y := $(dts-dirs) 7subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
new file mode 100644
index 000000000000..5b1d0181023b
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -0,0 +1,191 @@
1/*
2 * ARM Ltd. Versatile Express
3 *
4 * LogicTile Express 20MG
5 * V2F-1XV7
6 *
7 * Cortex-A53 (2 cores) Soft Macrocell Model
8 *
9 * HBI-0247C
10 */
11
12/dts-v1/;
13
14#include <dt-bindings/interrupt-controller/arm-gic.h>
15
16/ {
17 model = "V2F-1XV7 Cortex-A53x2 SMM";
18 arm,hbi = <0x247>;
19 arm,vexpress,site = <0xf>;
20 compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress";
21 interrupt-parent = <&gic>;
22 #address-cells = <2>;
23 #size-cells = <2>;
24
25 chosen {
26 stdout-path = "serial0:38400n8";
27 };
28
29 aliases {
30 serial0 = &v2m_serial0;
31 serial1 = &v2m_serial1;
32 serial2 = &v2m_serial2;
33 serial3 = &v2m_serial3;
34 i2c0 = &v2m_i2c_dvi;
35 i2c1 = &v2m_i2c_pcie;
36 };
37
38 cpus {
39 #address-cells = <2>;
40 #size-cells = <0>;
41
42 cpu@0 {
43 device_type = "cpu";
44 compatible = "arm,cortex-a53", "arm,armv8";
45 reg = <0 0>;
46 next-level-cache = <&L2_0>;
47 };
48
49 cpu@1 {
50 device_type = "cpu";
51 compatible = "arm,cortex-a53", "arm,armv8";
52 reg = <0 1>;
53 next-level-cache = <&L2_0>;
54 };
55
56 L2_0: l2-cache0 {
57 compatible = "cache";
58 };
59 };
60
61 memory@80000000 {
62 device_type = "memory";
63 reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */
64 };
65
66 gic: interrupt-controller@2c001000 {
67 compatible = "arm,gic-400";
68 #interrupt-cells = <3>;
69 #address-cells = <0>;
70 interrupt-controller;
71 reg = <0 0x2c001000 0 0x1000>,
72 <0 0x2c002000 0 0x2000>,
73 <0 0x2c004000 0 0x2000>,
74 <0 0x2c006000 0 0x2000>;
75 interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
76 };
77
78 timer {
79 compatible = "arm,armv8-timer";
80 interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
81 <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
82 <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
83 <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
84 };
85
86 pmu {
87 compatible = "arm,armv8-pmuv3";
88 interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
89 <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
90 };
91
92 dcc {
93 compatible = "arm,vexpress,config-bus";
94 arm,vexpress,config-bridge = <&v2m_sysreg>;
95
96 smbclk: osc@4 {
97 /* SMC clock */
98 compatible = "arm,vexpress-osc";
99 arm,vexpress-sysreg,func = <1 4>;
100 freq-range = <40000000 40000000>;
101 #clock-cells = <0>;
102 clock-output-names = "smclk";
103 };
104
105 volt@0 {
106 /* VIO to expansion board above */
107 compatible = "arm,vexpress-volt";
108 arm,vexpress-sysreg,func = <2 0>;
109 regulator-name = "VIO_UP";
110 regulator-min-microvolt = <800000>;
111 regulator-max-microvolt = <1800000>;
112 regulator-always-on;
113 };
114
115 volt@1 {
116 /* 12V from power connector J6 */
117 compatible = "arm,vexpress-volt";
118 arm,vexpress-sysreg,func = <2 1>;
119 regulator-name = "12";
120 regulator-always-on;
121 };
122
123 temp@0 {
124 /* FPGA temperature */
125 compatible = "arm,vexpress-temp";
126 arm,vexpress-sysreg,func = <4 0>;
127 label = "FPGA";
128 };
129 };
130
131 smb {
132 compatible = "simple-bus";
133
134 #address-cells = <2>;
135 #size-cells = <1>;
136 ranges = <0 0 0 0x08000000 0x04000000>,
137 <1 0 0 0x14000000 0x04000000>,
138 <2 0 0 0x18000000 0x04000000>,
139 <3 0 0 0x1c000000 0x04000000>,
140 <4 0 0 0x0c000000 0x04000000>,
141 <5 0 0 0x10000000 0x04000000>;
142
143 #interrupt-cells = <1>;
144 interrupt-map-mask = <0 0 63>;
145 interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
146 <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
147 <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
148 <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
149 <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
150 <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
151 <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
152 <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
153 <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
154 <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
155 <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
156 <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
157 <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
158 <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
159 <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
160 <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
161 <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
162 <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
163 <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
164 <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
165 <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
166 <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
167 <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
168 <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
169 <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
170 <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
171 <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
172 <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
173 <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
174 <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
175 <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
176 <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
177 <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
178 <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
179 <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
180 <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
181 <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
182 <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
183 <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
184 <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
185 <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
186 <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
187 <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
188
189 /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi"
190 };
191};
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index d8c0bdc51882..9cb7cf94284a 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -376,10 +376,19 @@
376 gic0: interrupt-controller@8010,00000000 { 376 gic0: interrupt-controller@8010,00000000 {
377 compatible = "arm,gic-v3"; 377 compatible = "arm,gic-v3";
378 #interrupt-cells = <3>; 378 #interrupt-cells = <3>;
379 #address-cells = <2>;
380 #size-cells = <2>;
381 ranges;
379 interrupt-controller; 382 interrupt-controller;
380 reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */ 383 reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */
381 <0x8010 0x80000000 0x0 0x600000>; /* GICR */ 384 <0x8010 0x80000000 0x0 0x600000>; /* GICR */
382 interrupts = <1 9 0xf04>; 385 interrupts = <1 9 0xf04>;
386
387 its: gic-its@8010,00020000 {
388 compatible = "arm,gic-v3-its";
389 msi-controller;
390 reg = <0x8010 0x20000 0x0 0x200000>;
391 };
383 }; 392 };
384 393
385 uaa0: serial@87e0,24000000 { 394 uaa0: serial@87e0,24000000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f38c94f1d898..4e17e7ede33d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -83,6 +83,7 @@ CONFIG_BLK_DEV_SD=y
83CONFIG_ATA=y 83CONFIG_ATA=y
84CONFIG_SATA_AHCI=y 84CONFIG_SATA_AHCI=y
85CONFIG_SATA_AHCI_PLATFORM=y 85CONFIG_SATA_AHCI_PLATFORM=y
86CONFIG_AHCI_CEVA=y
86CONFIG_AHCI_XGENE=y 87CONFIG_AHCI_XGENE=y
87CONFIG_PATA_PLATFORM=y 88CONFIG_PATA_PLATFORM=y
88CONFIG_PATA_OF_PLATFORM=y 89CONFIG_PATA_OF_PLATFORM=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index b112a39834d0..70fd9ffb58cf 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
25generic-y += local.h 25generic-y += local.h
26generic-y += local64.h 26generic-y += local64.h
27generic-y += mcs_spinlock.h 27generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h
28generic-y += mman.h 29generic-y += mman.h
29generic-y += msgbuf.h 30generic-y += msgbuf.h
30generic-y += msi.h 31generic-y += msi.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 39248d3adf5d..406485ed110a 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -19,6 +19,14 @@
19#include <asm/psci.h> 19#include <asm/psci.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22/* Macros for consistency checks of the GICC subtable of MADT */
23#define ACPI_MADT_GICC_LENGTH \
24 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
25
26#define BAD_MADT_GICC_ENTRY(entry, end) \
27 (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
28 (entry)->header.length != ACPI_MADT_GICC_LENGTH)
29
22/* Basic configuration for ACPI */ 30/* Basic configuration for ACPI */
23#ifdef CONFIG_ACPI 31#ifdef CONFIG_ACPI
24/* ACPI table mapping after acpi_gbl_permanent_mmap is set */ 32/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 562b655f5ba9..000000000000
--- a/arch/arm64/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H
13#define _ASM_ARM64_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9d4aa18f2a82..e8ca6eaedd02 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
122 122
123 /* Show what we know for posterity */ 123 /* Show what we know for posterity */
124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), 124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
125 sizeof(vendor)); 125 sizeof(vendor) * sizeof(efi_char16_t));
126 if (c16) { 126 if (c16) {
127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
128 vendor[i] = c16[i]; 128 vendor[i] = c16[i];
129 vendor[i] = '\0'; 129 vendor[i] = '\0';
130 early_memunmap(c16, sizeof(vendor)); 130 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
131 } 131 }
132 132
133 pr_info("EFI v%u.%.02u by %s\n", 133 pr_info("EFI v%u.%.02u by %s\n",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a7691a378668..e16351819fed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -352,8 +352,8 @@ el1_inv:
352 // TODO: add support for undefined instructions in kernel mode 352 // TODO: add support for undefined instructions in kernel mode
353 enable_dbg 353 enable_dbg
354 mov x0, sp 354 mov x0, sp
355 mov x2, x1
355 mov x1, #BAD_SYNC 356 mov x1, #BAD_SYNC
356 mrs x2, esr_el1
357 b bad_mode 357 b bad_mode
358ENDPROC(el1_sync) 358ENDPROC(el1_sync)
359 359
@@ -553,7 +553,7 @@ el0_inv:
553 ct_user_exit 553 ct_user_exit
554 mov x0, sp 554 mov x0, sp
555 mov x1, #BAD_SYNC 555 mov x1, #BAD_SYNC
556 mrs x2, esr_el1 556 mov x2, x25
557 bl bad_mode 557 bl bad_mode
558 b ret_to_user 558 b ret_to_user
559ENDPROC(el0_sync) 559ENDPROC(el0_sync)
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
585 * 585 *
586 */ 586 */
587ENTRY(cpu_switch_to) 587ENTRY(cpu_switch_to)
588 add x8, x0, #THREAD_CPU_CONTEXT 588 mov x10, #THREAD_CPU_CONTEXT
589 add x8, x0, x10
589 mov x9, sp 590 mov x9, sp
590 stp x19, x20, [x8], #16 // store callee-saved registers 591 stp x19, x20, [x8], #16 // store callee-saved registers
591 stp x21, x22, [x8], #16 592 stp x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
594 stp x27, x28, [x8], #16 595 stp x27, x28, [x8], #16
595 stp x29, x9, [x8], #16 596 stp x29, x9, [x8], #16
596 str lr, [x8] 597 str lr, [x8]
597 add x8, x1, #THREAD_CPU_CONTEXT 598 add x8, x1, x10
598 ldp x19, x20, [x8], #16 // restore callee-saved registers 599 ldp x19, x20, [x8], #16 // restore callee-saved registers
599 ldp x21, x22, [x8], #16 600 ldp x21, x22, [x8], #16
600 ldp x23, x24, [x8], #16 601 ldp x23, x24, [x8], #16
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
index bd9bfaa9269b..f332d5d1f6b4 100644
--- a/arch/arm64/kernel/entry32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -32,13 +32,11 @@
32 32
33ENTRY(compat_sys_sigreturn_wrapper) 33ENTRY(compat_sys_sigreturn_wrapper)
34 mov x0, sp 34 mov x0, sp
35 mov x27, #0 // prevent syscall restart handling (why)
36 b compat_sys_sigreturn 35 b compat_sys_sigreturn
37ENDPROC(compat_sys_sigreturn_wrapper) 36ENDPROC(compat_sys_sigreturn_wrapper)
38 37
39ENTRY(compat_sys_rt_sigreturn_wrapper) 38ENTRY(compat_sys_rt_sigreturn_wrapper)
40 mov x0, sp 39 mov x0, sp
41 mov x27, #0 // prevent syscall restart handling (why)
42 b compat_sys_rt_sigreturn 40 b compat_sys_rt_sigreturn
43ENDPROC(compat_sys_rt_sigreturn_wrapper) 41ENDPROC(compat_sys_rt_sigreturn_wrapper)
44 42
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 240b75c0e94f..463fa2e7e34c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
61static bool migrate_one_irq(struct irq_desc *desc) 61static bool migrate_one_irq(struct irq_desc *desc)
62{ 62{
63 struct irq_data *d = irq_desc_get_irq_data(desc); 63 struct irq_data *d = irq_desc_get_irq_data(desc);
64 const struct cpumask *affinity = d->affinity; 64 const struct cpumask *affinity = irq_data_get_affinity_mask(d);
65 struct irq_chip *c; 65 struct irq_chip *c;
66 bool ret = false; 66 bool ret = false;
67 67
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
81 if (!c->irq_set_affinity) 81 if (!c->irq_set_affinity)
82 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 82 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
84 cpumask_copy(d->affinity, affinity); 84 cpumask_copy(irq_data_get_affinity_mask(d), affinity);
85 85
86 return ret; 86 return ret;
87} 87}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
168 * Other callers might not initialize the si_lsb field, 168 * Other callers might not initialize the si_lsb field,
169 * so check explicitely for the right codes here. 169 * so check explicitely for the right codes here.
170 */ 170 */
171 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 171 if (from->si_signo == SIGBUS &&
172 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
172 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 173 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
173#endif 174#endif
174 break; 175 break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
201 202
202int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 203int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
203{ 204{
204 memset(to, 0, sizeof *to);
205
206 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || 205 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
207 copy_from_user(to->_sifields._pad, 206 copy_from_user(to->_sifields._pad,
208 from->_sifields._pad, SI_PAD_SIZE)) 207 from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 695801a54ca5..50fb4696654e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -438,7 +438,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
438 struct acpi_madt_generic_interrupt *processor; 438 struct acpi_madt_generic_interrupt *processor;
439 439
440 processor = (struct acpi_madt_generic_interrupt *)header; 440 processor = (struct acpi_madt_generic_interrupt *)header;
441 if (BAD_MADT_ENTRY(processor, end)) 441 if (BAD_MADT_GICC_ENTRY(processor, end))
442 return -EINVAL; 442 return -EINVAL;
443 443
444 acpi_table_print_madt_entry(header); 444 acpi_table_print_madt_entry(header);
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
199 */ 199 */
200void update_vsyscall(struct timekeeper *tk) 200void update_vsyscall(struct timekeeper *tk)
201{ 201{
202 struct timespec xtime_coarse;
203 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); 202 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
204 203
205 ++vdso_data->tb_seq_count; 204 ++vdso_data->tb_seq_count;
206 smp_wmb(); 205 smp_wmb();
207 206
208 xtime_coarse = __current_kernel_time();
209 vdso_data->use_syscall = use_syscall; 207 vdso_data->use_syscall = use_syscall;
210 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 208 vdso_data->xtime_coarse_sec = tk->xtime_sec;
211 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 209 vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
210 tk->tkr_mono.shift;
212 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; 211 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
213 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 212 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
214 213
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 9d84feb41a16..773d37a14039 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,5 +4,3 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
4 context.o proc.o pageattr.o 4 context.o proc.o pageattr.o
5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
6obj-$(CONFIG_ARM64_PTDUMP) += dump.o 6obj-$(CONFIG_ARM64_PTDUMP) += dump.o
7
8CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 1d66afdfac07..f61f2dd67464 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += irq_work.h
12generic-y += local.h 12generic-y += local.h
13generic-y += local64.h 13generic-y += local64.h
14generic-y += mcs_spinlock.h 14generic-y += mcs_spinlock.h
15generic-y += mm-arch-hooks.h
15generic-y += param.h 16generic-y += param.h
16generic-y += percpu.h 17generic-y += percpu.h
17generic-y += preempt.h 18generic-y += preempt.h
diff --git a/arch/avr32/include/asm/mm-arch-hooks.h b/arch/avr32/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 145452ffbdad..000000000000
--- a/arch/avr32/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_AVR32_MM_ARCH_HOOKS_H
13#define _ASM_AVR32_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_AVR32_MM_ARCH_HOOKS_H */
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index d0f771be9e96..a124c55733db 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -18,6 +18,7 @@
18 18
19#include <mach/pm.h> 19#include <mach/pm.h>
20 20
21static bool disable_cpu_idle_poll;
21 22
22static cycle_t read_cycle_count(struct clocksource *cs) 23static cycle_t read_cycle_count(struct clocksource *cs)
23{ 24{
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
80 return 0; 81 return 0;
81} 82}
82 83
83static void comparator_mode(enum clock_event_mode mode, 84static int comparator_shutdown(struct clock_event_device *evdev)
84 struct clock_event_device *evdev)
85{ 85{
86 switch (mode) { 86 pr_debug("%s: %s\n", __func__, evdev->name);
87 case CLOCK_EVT_MODE_ONESHOT: 87 sysreg_write(COMPARE, 0);
88 pr_debug("%s: start\n", evdev->name); 88
89 /* FALLTHROUGH */ 89 if (disable_cpu_idle_poll) {
90 case CLOCK_EVT_MODE_RESUME: 90 disable_cpu_idle_poll = false;
91 /* 91 /*
92 * If we're using the COUNT and COMPARE registers we 92 * Only disable idle poll if we have forced that
93 * need to force idle poll. 93 * in a previous call.
94 */ 94 */
95 cpu_idle_poll_ctrl(true); 95 cpu_idle_poll_ctrl(false);
96 break;
97 case CLOCK_EVT_MODE_UNUSED:
98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name);
101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
109 break;
110 default:
111 BUG();
112 } 96 }
97 return 0;
98}
99
100static int comparator_set_oneshot(struct clock_event_device *evdev)
101{
102 pr_debug("%s: %s\n", __func__, evdev->name);
103
104 disable_cpu_idle_poll = true;
105 /*
106 * If we're using the COUNT and COMPARE registers we
107 * need to force idle poll.
108 */
109 cpu_idle_poll_ctrl(true);
110
111 return 0;
113} 112}
114 113
115static struct clock_event_device comparator = { 114static struct clock_event_device comparator = {
116 .name = "avr32_comparator", 115 .name = "avr32_comparator",
117 .features = CLOCK_EVT_FEAT_ONESHOT, 116 .features = CLOCK_EVT_FEAT_ONESHOT,
118 .shift = 16, 117 .shift = 16,
119 .rating = 50, 118 .rating = 50,
120 .set_next_event = comparator_next_event, 119 .set_next_event = comparator_next_event,
121 .set_mode = comparator_mode, 120 .set_state_shutdown = comparator_shutdown,
121 .set_state_oneshot = comparator_set_oneshot,
122 .tick_resume = comparator_set_oneshot,
122}; 123};
123 124
124void read_persistent_clock(struct timespec *ts) 125void read_persistent_clock(struct timespec *ts)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 23b1a97fae7a..52c179bec0cc 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!clk)
84 return 0;
85
83 spin_lock_irqsave(&clk_lock, flags); 86 spin_lock_irqsave(&clk_lock, flags);
84 __clk_enable(clk); 87 __clk_enable(clk);
85 spin_unlock_irqrestore(&clk_lock, flags); 88 spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
106{ 109{
107 unsigned long flags; 110 unsigned long flags;
108 111
112 if (IS_ERR_OR_NULL(clk))
113 return;
114
109 spin_lock_irqsave(&clk_lock, flags); 115 spin_lock_irqsave(&clk_lock, flags);
110 __clk_disable(clk); 116 __clk_disable(clk);
111 spin_unlock_irqrestore(&clk_lock, flags); 117 spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
117 unsigned long flags; 123 unsigned long flags;
118 unsigned long rate; 124 unsigned long rate;
119 125
126 if (!clk)
127 return 0;
128
120 spin_lock_irqsave(&clk_lock, flags); 129 spin_lock_irqsave(&clk_lock, flags);
121 rate = clk->get_rate(clk); 130 rate = clk->get_rate(clk);
122 spin_unlock_irqrestore(&clk_lock, flags); 131 spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
129{ 138{
130 unsigned long flags, actual_rate; 139 unsigned long flags, actual_rate;
131 140
141 if (!clk)
142 return 0;
143
132 if (!clk->set_rate) 144 if (!clk->set_rate)
133 return -ENOSYS; 145 return -ENOSYS;
134 146
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
145 unsigned long flags; 157 unsigned long flags;
146 long ret; 158 long ret;
147 159
160 if (!clk)
161 return 0;
162
148 if (!clk->set_rate) 163 if (!clk->set_rate)
149 return -ENOSYS; 164 return -ENOSYS;
150 165
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
161 unsigned long flags; 176 unsigned long flags;
162 int ret; 177 int ret;
163 178
179 if (!clk)
180 return 0;
181
164 if (!clk->set_parent) 182 if (!clk->set_parent)
165 return -ENOSYS; 183 return -ENOSYS;
166 184
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
174 192
175struct clk *clk_get_parent(struct clk *clk) 193struct clk *clk_get_parent(struct clk *clk)
176{ 194{
177 return clk->parent; 195 return !clk ? NULL : clk->parent;
178} 196}
179EXPORT_SYMBOL(clk_get_parent); 197EXPORT_SYMBOL(clk_get_parent);
180 198
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 07051a63415d..61cd1e786a14 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -21,6 +21,7 @@ generic-y += kvm_para.h
21generic-y += local.h 21generic-y += local.h
22generic-y += local64.h 22generic-y += local64.h
23generic-y += mcs_spinlock.h 23generic-y += mcs_spinlock.h
24generic-y += mm-arch-hooks.h
24generic-y += mman.h 25generic-y += mman.h
25generic-y += msgbuf.h 26generic-y += msgbuf.h
26generic-y += mutex.h 27generic-y += mutex.h
diff --git a/arch/blackfin/include/asm/mm-arch-hooks.h b/arch/blackfin/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 1c5211ec338f..000000000000
--- a/arch/blackfin/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_BLACKFIN_MM_ARCH_HOOKS_H
13#define _ASM_BLACKFIN_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_BLACKFIN_MM_ARCH_HOOKS_H */
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 7aeb32272975..f17c4dc6050c 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += kdebug.h
26generic-y += kmap_types.h 26generic-y += kmap_types.h
27generic-y += local.h 27generic-y += local.h
28generic-y += mcs_spinlock.h 28generic-y += mcs_spinlock.h
29generic-y += mm-arch-hooks.h
29generic-y += mman.h 30generic-y += mman.h
30generic-y += mmu.h 31generic-y += mmu.h
31generic-y += mmu_context.h 32generic-y += mmu_context.h
diff --git a/arch/c6x/include/asm/mm-arch-hooks.h b/arch/c6x/include/asm/mm-arch-hooks.h
deleted file mode 100644
index bb3c4a6ce8e9..000000000000
--- a/arch/c6x/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_C6X_MM_ARCH_HOOKS_H
13#define _ASM_C6X_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_C6X_MM_ARCH_HOOKS_H */
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 4dda9bd6b8fb..e989cee77414 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port)
1464 if (port->write_ts_idx == NBR_IN_DESCR) 1464 if (port->write_ts_idx == NBR_IN_DESCR)
1465 port->write_ts_idx = 0; 1465 port->write_ts_idx = 0;
1466 idx = port->write_ts_idx++; 1466 idx = port->write_ts_idx++;
1467 do_posix_clock_monotonic_gettime(&port->timestamp[idx]); 1467 ktime_get_ts(&port->timestamp[idx]);
1468 port->in_buffer_len += port->inbufchunk; 1468 port->in_buffer_len += port->inbufchunk;
1469 } 1469 }
1470 spin_unlock_irqrestore(&port->lock, flags); 1470 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d294f6aaff1d..ad2244f35bca 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += linkage.h
18generic-y += local.h 18generic-y += local.h
19generic-y += local64.h 19generic-y += local64.h
20generic-y += mcs_spinlock.h 20generic-y += mcs_spinlock.h
21generic-y += mm-arch-hooks.h
21generic-y += module.h 22generic-y += module.h
22generic-y += percpu.h 23generic-y += percpu.h
23generic-y += preempt.h 24generic-y += preempt.h
diff --git a/arch/cris/include/asm/mm-arch-hooks.h b/arch/cris/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 314f774db2b0..000000000000
--- a/arch/cris/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_CRIS_MM_ARCH_HOOKS_H
13#define _ASM_CRIS_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_CRIS_MM_ARCH_HOOKS_H */
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 30edce31e5c2..8e47b832cc76 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -4,5 +4,6 @@ generic-y += cputime.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += irq_work.h 5generic-y += irq_work.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h
7generic-y += preempt.h 8generic-y += preempt.h
8generic-y += trace_clock.h 9generic-y += trace_clock.h
diff --git a/arch/frv/include/asm/mm-arch-hooks.h b/arch/frv/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 51d13a870404..000000000000
--- a/arch/frv/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_FRV_MM_ARCH_HOOKS_H
13#define _ASM_FRV_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_FRV_MM_ARCH_HOOKS_H */
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 00379d64f707..70e6ae1e7006 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -33,6 +33,7 @@ generic-y += linkage.h
33generic-y += local.h 33generic-y += local.h
34generic-y += local64.h 34generic-y += local64.h
35generic-y += mcs_spinlock.h 35generic-y += mcs_spinlock.h
36generic-y += mm-arch-hooks.h
36generic-y += mman.h 37generic-y += mman.h
37generic-y += mmu.h 38generic-y += mmu.h
38generic-y += mmu_context.h 39generic-y += mmu_context.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 5ade4a163558..daee37bd0999 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -28,6 +28,7 @@ generic-y += kmap_types.h
28generic-y += local.h 28generic-y += local.h
29generic-y += local64.h 29generic-y += local64.h
30generic-y += mcs_spinlock.h 30generic-y += mcs_spinlock.h
31generic-y += mm-arch-hooks.h
31generic-y += mman.h 32generic-y += mman.h
32generic-y += msgbuf.h 33generic-y += msgbuf.h
33generic-y += pci.h 34generic-y += pci.h
diff --git a/arch/hexagon/include/asm/mm-arch-hooks.h b/arch/hexagon/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 05e8b939e416..000000000000
--- a/arch/hexagon/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_HEXAGON_MM_ARCH_HOOKS_H
13#define _ASM_HEXAGON_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_HEXAGON_MM_ARCH_HOOKS_H */
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index ccff13d33fa2..9de3ba12f6b9 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += exec.h
4generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += kvm_para.h 5generic-y += kvm_para.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h
7generic-y += preempt.h 8generic-y += preempt.h
8generic-y += trace_clock.h 9generic-y += trace_clock.h
9generic-y += vtime.h 10generic-y += vtime.h
diff --git a/arch/ia64/include/asm/mm-arch-hooks.h b/arch/ia64/include/asm/mm-arch-hooks.h
deleted file mode 100644
index ab4b5c698322..000000000000
--- a/arch/ia64/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_IA64_MM_ARCH_HOOKS_H
13#define _ASM_IA64_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_IA64_MM_ARCH_HOOKS_H */
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index ba1cdc018731..e0eb704ca1fa 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += cputime.h
4generic-y += exec.h 4generic-y += exec.h
5generic-y += irq_work.h 5generic-y += irq_work.h
6generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
7generic-y += mm-arch-hooks.h
7generic-y += module.h 8generic-y += module.h
8generic-y += preempt.h 9generic-y += preempt.h
9generic-y += sections.h 10generic-y += sections.h
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 0c3f25ee3381..f8de767ce2bc 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
174#define iowrite16 writew 174#define iowrite16 writew
175#define iowrite32 writel 175#define iowrite32 writel
176 176
177#define ioread16be(addr) be16_to_cpu(readw(addr))
178#define ioread32be(addr) be32_to_cpu(readl(addr))
179#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
180#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
181
177#define mmiowb() 182#define mmiowb()
178 183
179#define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 184#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/m32r/include/asm/mm-arch-hooks.h b/arch/m32r/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 6d60b4750f41..000000000000
--- a/arch/m32r/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_M32R_MM_ARCH_HOOKS_H
13#define _ASM_M32R_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_M32R_MM_ARCH_HOOKS_H */
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 33013dfcd3e1..c496d48a8c8d 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -125,6 +125,13 @@ endif # M68KCLASSIC
125 125
126if COLDFIRE 126if COLDFIRE
127 127
128choice
129 prompt "ColdFire SoC type"
130 default M520x
131 help
132 Select the type of ColdFire System-on-Chip (SoC) that you want
133 to build for.
134
128config M5206 135config M5206
129 bool "MCF5206" 136 bool "MCF5206"
130 depends on !MMU 137 depends on !MMU
@@ -174,9 +181,6 @@ config M525x
174 help 181 help
175 Freescale (Motorola) Coldfire 5251/5253 processor support. 182 Freescale (Motorola) Coldfire 5251/5253 processor support.
176 183
177config M527x
178 bool
179
180config M5271 184config M5271
181 bool "MCF5271" 185 bool "MCF5271"
182 depends on !MMU 186 depends on !MMU
@@ -223,9 +227,6 @@ config M5307
223 help 227 help
224 Motorola ColdFire 5307 processor support. 228 Motorola ColdFire 5307 processor support.
225 229
226config M53xx
227 bool
228
229config M532x 230config M532x
230 bool "MCF532x" 231 bool "MCF532x"
231 depends on !MMU 232 depends on !MMU
@@ -251,9 +252,6 @@ config M5407
251 help 252 help
252 Motorola ColdFire 5407 processor support. 253 Motorola ColdFire 5407 processor support.
253 254
254config M54xx
255 bool
256
257config M547x 255config M547x
258 bool "MCF547x" 256 bool "MCF547x"
259 select M54xx 257 select M54xx
@@ -280,6 +278,17 @@ config M5441x
280 help 278 help
281 Freescale Coldfire 54410/54415/54416/54417/54418 processor support. 279 Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
282 280
281endchoice
282
283config M527x
284 bool
285
286config M53xx
287 bool
288
289config M54xx
290 bool
291
283endif # COLDFIRE 292endif # COLDFIRE
284 293
285 294
@@ -416,22 +425,18 @@ config HAVE_MBAR
416config HAVE_IPSBAR 425config HAVE_IPSBAR
417 bool 426 bool
418 427
419config CLOCK_SET
420 bool "Enable setting the CPU clock frequency"
421 depends on COLDFIRE
422 default n
423 help
424 On some CPU's you do not need to know what the core CPU clock
425 frequency is. On these you can disable clock setting. On some
426 traditional 68K parts, and on all ColdFire parts you need to set
427 the appropriate CPU clock frequency. On these devices many of the
428 onboard peripherals derive their timing from the master CPU clock
429 frequency.
430
431config CLOCK_FREQ 428config CLOCK_FREQ
432 int "Set the core clock frequency" 429 int "Set the core clock frequency"
430 default "25000000" if M5206
431 default "54000000" if M5206e
432 default "166666666" if M520x
433 default "140000000" if M5249
434 default "150000000" if M527x || M523x
435 default "90000000" if M5307
436 default "50000000" if M5407
437 default "266000000" if M54xx
433 default "66666666" 438 default "66666666"
434 depends on CLOCK_SET 439 depends on COLDFIRE
435 help 440 help
436 Define the CPU clock frequency in use. This is the core clock 441 Define the CPU clock frequency in use. This is the core clock
437 frequency, it may or may not be the same as the external clock 442 frequency, it may or may not be the same as the external clock
diff --git a/arch/m68k/configs/m5208evb_defconfig b/arch/m68k/configs/m5208evb_defconfig
index e7292f460af4..4c7b7938d53a 100644
--- a/arch/m68k/configs/m5208evb_defconfig
+++ b/arch/m68k/configs/m5208evb_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,17 +12,12 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
19CONFIG_M520x=y 15# CONFIG_MMU is not set
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=166666666
22CONFIG_CLOCK_DIV=2
23CONFIG_M5208EVB=y
24# CONFIG_4KSTACKS is not set 16# CONFIG_4KSTACKS is not set
25CONFIG_RAMBASE=0x40000000 17CONFIG_RAMBASE=0x40000000
26CONFIG_RAMSIZE=0x2000000 18CONFIG_RAMSIZE=0x2000000
27CONFIG_VECTORBASE=0x40000000 19CONFIG_VECTORBASE=0x40000000
28CONFIG_KERNELBASE=0x40020000 20CONFIG_KERNELBASE=0x40020000
29CONFIG_RAM16BIT=y
30CONFIG_BINFMT_FLAT=y 21CONFIG_BINFMT_FLAT=y
31CONFIG_NET=y 22CONFIG_NET=y
32CONFIG_PACKET=y 23CONFIG_PACKET=y
@@ -40,24 +31,19 @@ CONFIG_INET=y
40# CONFIG_IPV6 is not set 31# CONFIG_IPV6 is not set
41# CONFIG_FW_LOADER is not set 32# CONFIG_FW_LOADER is not set
42CONFIG_MTD=y 33CONFIG_MTD=y
43CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 34CONFIG_MTD_BLOCK=y
45CONFIG_MTD_RAM=y 35CONFIG_MTD_RAM=y
46CONFIG_MTD_UCLINUX=y 36CONFIG_MTD_UCLINUX=y
47CONFIG_BLK_DEV_RAM=y 37CONFIG_BLK_DEV_RAM=y
48# CONFIG_MISC_DEVICES is not set
49CONFIG_NETDEVICES=y 38CONFIG_NETDEVICES=y
50CONFIG_NET_ETHERNET=y
51CONFIG_FEC=y 39CONFIG_FEC=y
52# CONFIG_NETDEV_1000 is not set
53# CONFIG_NETDEV_10000 is not set
54# CONFIG_INPUT is not set 40# CONFIG_INPUT is not set
55# CONFIG_SERIO is not set 41# CONFIG_SERIO is not set
56# CONFIG_VT is not set 42# CONFIG_VT is not set
43# CONFIG_UNIX98_PTYS is not set
57CONFIG_SERIAL_MCF=y 44CONFIG_SERIAL_MCF=y
58CONFIG_SERIAL_MCF_BAUDRATE=115200 45CONFIG_SERIAL_MCF_BAUDRATE=115200
59CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
60# CONFIG_UNIX98_PTYS is not set
61# CONFIG_HW_RANDOM is not set 47# CONFIG_HW_RANDOM is not set
62# CONFIG_HWMON is not set 48# CONFIG_HWMON is not set
63# CONFIG_USB_SUPPORT is not set 49# CONFIG_USB_SUPPORT is not set
@@ -68,8 +54,6 @@ CONFIG_EXT2_FS=y
68CONFIG_ROMFS_FS=y 54CONFIG_ROMFS_FS=y
69CONFIG_ROMFS_BACKED_BY_MTD=y 55CONFIG_ROMFS_BACKED_BY_MTD=y
70# CONFIG_NETWORK_FILESYSTEMS is not set 56# CONFIG_NETWORK_FILESYSTEMS is not set
71# CONFIG_RCU_CPU_STALL_DETECTOR is not set
72CONFIG_SYSCTL_SYSCALL_CHECK=y
73CONFIG_FULLDEBUG=y
74CONFIG_BOOTPARAM=y 57CONFIG_BOOTPARAM=y
75CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 58CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
59CONFIG_FULLDEBUG=y
diff --git a/arch/m68k/configs/m5249evb_defconfig b/arch/m68k/configs/m5249evb_defconfig
index 0cd4b39f325b..a782f368650f 100644
--- a/arch/m68k/configs/m5249evb_defconfig
+++ b/arch/m68k/configs/m5249evb_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5249=y 16CONFIG_M5249=y
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=140000000
22CONFIG_CLOCK_DIV=2
23CONFIG_M5249C3=y 17CONFIG_M5249C3=y
24CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
25CONFIG_RAMSIZE=0x00800000 19CONFIG_RAMSIZE=0x00800000
@@ -38,23 +32,18 @@ CONFIG_INET=y
38# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
39# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
40CONFIG_MTD=y 34CONFIG_MTD=y
41CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
43CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
44CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
45CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
46# CONFIG_MISC_DEVICES is not set
47CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
48CONFIG_NET_ETHERNET=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y 40CONFIG_PPP=y
52# CONFIG_INPUT is not set 41# CONFIG_INPUT is not set
53# CONFIG_SERIO is not set 42# CONFIG_SERIO is not set
54# CONFIG_VT is not set 43# CONFIG_VT is not set
44# CONFIG_UNIX98_PTYS is not set
55CONFIG_SERIAL_MCF=y 45CONFIG_SERIAL_MCF=y
56CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
57# CONFIG_UNIX98_PTYS is not set
58# CONFIG_HWMON is not set 47# CONFIG_HWMON is not set
59# CONFIG_USB_SUPPORT is not set 48# CONFIG_USB_SUPPORT is not set
60CONFIG_EXT2_FS=y 49CONFIG_EXT2_FS=y
@@ -62,7 +51,5 @@ CONFIG_EXT2_FS=y
62CONFIG_ROMFS_FS=y 51CONFIG_ROMFS_FS=y
63CONFIG_ROMFS_BACKED_BY_MTD=y 52CONFIG_ROMFS_BACKED_BY_MTD=y
64# CONFIG_NETWORK_FILESYSTEMS is not set 53# CONFIG_NETWORK_FILESYSTEMS is not set
65# CONFIG_RCU_CPU_STALL_DETECTOR is not set
66CONFIG_BOOTPARAM=y 54CONFIG_BOOTPARAM=y
67CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 55CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
68# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5272c3_defconfig b/arch/m68k/configs/m5272c3_defconfig
index a60cb3509135..6f5fb92f5cbf 100644
--- a/arch/m68k/configs/m5272c3_defconfig
+++ b/arch/m68k/configs/m5272c3_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,8 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5272=y 16CONFIG_M5272=y
20CONFIG_CLOCK_SET=y
21CONFIG_M5272C3=y 17CONFIG_M5272C3=y
22CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
23CONFIG_RAMSIZE=0x00800000 19CONFIG_RAMSIZE=0x00800000
@@ -36,23 +32,18 @@ CONFIG_INET=y
36# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
37# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
38CONFIG_MTD=y 34CONFIG_MTD=y
39CONFIG_MTD_CHAR=y
40CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
41CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
42CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
43CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
44# CONFIG_MISC_DEVICES is not set
45CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
46CONFIG_NET_ETHERNET=y
47CONFIG_FEC=y 40CONFIG_FEC=y
48# CONFIG_NETDEV_1000 is not set
49# CONFIG_NETDEV_10000 is not set
50# CONFIG_INPUT is not set 41# CONFIG_INPUT is not set
51# CONFIG_SERIO is not set 42# CONFIG_SERIO is not set
52# CONFIG_VT is not set 43# CONFIG_VT is not set
44# CONFIG_UNIX98_PTYS is not set
53CONFIG_SERIAL_MCF=y 45CONFIG_SERIAL_MCF=y
54CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
55# CONFIG_UNIX98_PTYS is not set
56# CONFIG_HWMON is not set 47# CONFIG_HWMON is not set
57# CONFIG_USB_SUPPORT is not set 48# CONFIG_USB_SUPPORT is not set
58CONFIG_EXT2_FS=y 49CONFIG_EXT2_FS=y
@@ -61,6 +52,5 @@ CONFIG_EXT2_FS=y
61CONFIG_ROMFS_FS=y 52CONFIG_ROMFS_FS=y
62CONFIG_ROMFS_BACKED_BY_MTD=y 53CONFIG_ROMFS_BACKED_BY_MTD=y
63# CONFIG_NETWORK_FILESYSTEMS is not set 54# CONFIG_NETWORK_FILESYSTEMS is not set
64# CONFIG_RCU_CPU_STALL_DETECTOR is not set
65CONFIG_BOOTPARAM=y 55CONFIG_BOOTPARAM=y
66CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 56CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig
index e6502ab7cb2f..b5d7cd1ce856 100644
--- a/arch/m68k/configs/m5275evb_defconfig
+++ b/arch/m68k/configs/m5275evb_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,11 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5275=y 16CONFIG_M5275=y
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=150000000
22CONFIG_CLOCK_DIV=2
23CONFIG_M5275EVB=y
24# CONFIG_4KSTACKS is not set 17# CONFIG_4KSTACKS is not set
25CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
26CONFIG_RAMSIZE=0x00000000 19CONFIG_RAMSIZE=0x00000000
@@ -39,24 +32,19 @@ CONFIG_INET=y
39# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
40# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
41CONFIG_MTD=y 34CONFIG_MTD=y
42CONFIG_MTD_CHAR=y
43CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
44CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
45CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
46CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
47# CONFIG_MISC_DEVICES is not set
48CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
49CONFIG_NET_ETHERNET=y
50CONFIG_FEC=y 40CONFIG_FEC=y
51# CONFIG_NETDEV_1000 is not set
52# CONFIG_NETDEV_10000 is not set
53CONFIG_PPP=y 41CONFIG_PPP=y
54# CONFIG_INPUT is not set 42# CONFIG_INPUT is not set
55# CONFIG_SERIO is not set 43# CONFIG_SERIO is not set
56# CONFIG_VT is not set 44# CONFIG_VT is not set
45# CONFIG_UNIX98_PTYS is not set
57CONFIG_SERIAL_MCF=y 46CONFIG_SERIAL_MCF=y
58CONFIG_SERIAL_MCF_CONSOLE=y 47CONFIG_SERIAL_MCF_CONSOLE=y
59# CONFIG_UNIX98_PTYS is not set
60# CONFIG_HWMON is not set 48# CONFIG_HWMON is not set
61# CONFIG_USB_SUPPORT is not set 49# CONFIG_USB_SUPPORT is not set
62CONFIG_EXT2_FS=y 50CONFIG_EXT2_FS=y
@@ -65,8 +53,5 @@ CONFIG_EXT2_FS=y
65CONFIG_ROMFS_FS=y 53CONFIG_ROMFS_FS=y
66CONFIG_ROMFS_BACKED_BY_MTD=y 54CONFIG_ROMFS_BACKED_BY_MTD=y
67# CONFIG_NETWORK_FILESYSTEMS is not set 55# CONFIG_NETWORK_FILESYSTEMS is not set
68# CONFIG_RCU_CPU_STALL_DETECTOR is not set
69CONFIG_SYSCTL_SYSCALL_CHECK=y
70CONFIG_BOOTPARAM=y 56CONFIG_BOOTPARAM=y
71CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 57CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
72# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5307c3_defconfig b/arch/m68k/configs/m5307c3_defconfig
index 023812abd2e6..1b4c09461c40 100644
--- a/arch/m68k/configs/m5307c3_defconfig
+++ b/arch/m68k/configs/m5307c3_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
16# CONFIG_BLK_DEV_BSG is not set 12# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 13# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 14# CONFIG_IOSCHED_CFQ is not set
15# CONFIG_MMU is not set
19CONFIG_M5307=y 16CONFIG_M5307=y
20CONFIG_CLOCK_SET=y
21CONFIG_CLOCK_FREQ=90000000
22CONFIG_CLOCK_DIV=2
23CONFIG_M5307C3=y 17CONFIG_M5307C3=y
24CONFIG_RAMBASE=0x00000000 18CONFIG_RAMBASE=0x00000000
25CONFIG_RAMSIZE=0x00800000 19CONFIG_RAMSIZE=0x00800000
@@ -38,16 +32,11 @@ CONFIG_INET=y
38# CONFIG_IPV6 is not set 32# CONFIG_IPV6 is not set
39# CONFIG_FW_LOADER is not set 33# CONFIG_FW_LOADER is not set
40CONFIG_MTD=y 34CONFIG_MTD=y
41CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 35CONFIG_MTD_BLOCK=y
43CONFIG_MTD_RAM=y 36CONFIG_MTD_RAM=y
44CONFIG_MTD_UCLINUX=y 37CONFIG_MTD_UCLINUX=y
45CONFIG_BLK_DEV_RAM=y 38CONFIG_BLK_DEV_RAM=y
46# CONFIG_MISC_DEVICES is not set
47CONFIG_NETDEVICES=y 39CONFIG_NETDEVICES=y
48CONFIG_NET_ETHERNET=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y 40CONFIG_PPP=y
52CONFIG_SLIP=y 41CONFIG_SLIP=y
53CONFIG_SLIP_COMPRESSED=y 42CONFIG_SLIP_COMPRESSED=y
@@ -56,21 +45,17 @@ CONFIG_SLIP_COMPRESSED=y
56# CONFIG_INPUT_MOUSE is not set 45# CONFIG_INPUT_MOUSE is not set
57# CONFIG_SERIO is not set 46# CONFIG_SERIO is not set
58# CONFIG_VT is not set 47# CONFIG_VT is not set
48# CONFIG_LEGACY_PTYS is not set
59CONFIG_SERIAL_MCF=y 49CONFIG_SERIAL_MCF=y
60CONFIG_SERIAL_MCF_CONSOLE=y 50CONFIG_SERIAL_MCF_CONSOLE=y
61# CONFIG_LEGACY_PTYS is not set
62# CONFIG_HW_RANDOM is not set 51# CONFIG_HW_RANDOM is not set
63# CONFIG_HWMON is not set 52# CONFIG_HWMON is not set
64# CONFIG_HID_SUPPORT is not set
65# CONFIG_USB_SUPPORT is not set 53# CONFIG_USB_SUPPORT is not set
66CONFIG_EXT2_FS=y 54CONFIG_EXT2_FS=y
67# CONFIG_DNOTIFY is not set 55# CONFIG_DNOTIFY is not set
68CONFIG_ROMFS_FS=y 56CONFIG_ROMFS_FS=y
69CONFIG_ROMFS_BACKED_BY_MTD=y 57CONFIG_ROMFS_BACKED_BY_MTD=y
70# CONFIG_NETWORK_FILESYSTEMS is not set 58# CONFIG_NETWORK_FILESYSTEMS is not set
71# CONFIG_RCU_CPU_STALL_DETECTOR is not set
72CONFIG_SYSCTL_SYSCALL_CHECK=y
73CONFIG_FULLDEBUG=y
74CONFIG_BOOTPARAM=y 59CONFIG_BOOTPARAM=y
75CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 60CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
76# CONFIG_CRC32 is not set 61CONFIG_FULLDEBUG=y
diff --git a/arch/m68k/configs/m5407c3_defconfig b/arch/m68k/configs/m5407c3_defconfig
index 557b39f3be90..275ad543d4bc 100644
--- a/arch/m68k/configs/m5407c3_defconfig
+++ b/arch/m68k/configs/m5407c3_defconfig
@@ -1,10 +1,6 @@
1# CONFIG_MMU is not set
2CONFIG_EXPERIMENTAL=y
3CONFIG_LOG_BUF_SHIFT=14 1CONFIG_LOG_BUF_SHIFT=14
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5CONFIG_EXPERT=y 2CONFIG_EXPERT=y
6# CONFIG_KALLSYMS is not set 3# CONFIG_KALLSYMS is not set
7# CONFIG_HOTPLUG is not set
8# CONFIG_FUTEX is not set 4# CONFIG_FUTEX is not set
9# CONFIG_EPOLL is not set 5# CONFIG_EPOLL is not set
10# CONFIG_SIGNALFD is not set 6# CONFIG_SIGNALFD is not set
@@ -17,9 +13,8 @@ CONFIG_MODULE_UNLOAD=y
17# CONFIG_BLK_DEV_BSG is not set 13# CONFIG_BLK_DEV_BSG is not set
18# CONFIG_IOSCHED_DEADLINE is not set 14# CONFIG_IOSCHED_DEADLINE is not set
19# CONFIG_IOSCHED_CFQ is not set 15# CONFIG_IOSCHED_CFQ is not set
16# CONFIG_MMU is not set
20CONFIG_M5407=y 17CONFIG_M5407=y
21CONFIG_CLOCK_SET=y
22CONFIG_CLOCK_FREQ=50000000
23CONFIG_M5407C3=y 18CONFIG_M5407C3=y
24CONFIG_RAMBASE=0x00000000 19CONFIG_RAMBASE=0x00000000
25CONFIG_RAMSIZE=0x00000000 20CONFIG_RAMSIZE=0x00000000
@@ -38,22 +33,17 @@ CONFIG_INET=y
38# CONFIG_IPV6 is not set 33# CONFIG_IPV6 is not set
39# CONFIG_FW_LOADER is not set 34# CONFIG_FW_LOADER is not set
40CONFIG_MTD=y 35CONFIG_MTD=y
41CONFIG_MTD_CHAR=y
42CONFIG_MTD_BLOCK=y 36CONFIG_MTD_BLOCK=y
43CONFIG_MTD_RAM=y 37CONFIG_MTD_RAM=y
44CONFIG_MTD_UCLINUX=y 38CONFIG_MTD_UCLINUX=y
45CONFIG_BLK_DEV_RAM=y 39CONFIG_BLK_DEV_RAM=y
46# CONFIG_MISC_DEVICES is not set
47CONFIG_NETDEVICES=y 40CONFIG_NETDEVICES=y
48CONFIG_NET_ETHERNET=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y 41CONFIG_PPP=y
52# CONFIG_INPUT is not set 42# CONFIG_INPUT is not set
53# CONFIG_VT is not set 43# CONFIG_VT is not set
44# CONFIG_UNIX98_PTYS is not set
54CONFIG_SERIAL_MCF=y 45CONFIG_SERIAL_MCF=y
55CONFIG_SERIAL_MCF_CONSOLE=y 46CONFIG_SERIAL_MCF_CONSOLE=y
56# CONFIG_UNIX98_PTYS is not set
57# CONFIG_HW_RANDOM is not set 47# CONFIG_HW_RANDOM is not set
58# CONFIG_HWMON is not set 48# CONFIG_HWMON is not set
59# CONFIG_USB_SUPPORT is not set 49# CONFIG_USB_SUPPORT is not set
@@ -63,8 +53,5 @@ CONFIG_EXT2_FS=y
63CONFIG_ROMFS_FS=y 53CONFIG_ROMFS_FS=y
64CONFIG_ROMFS_BACKED_BY_MTD=y 54CONFIG_ROMFS_BACKED_BY_MTD=y
65# CONFIG_NETWORK_FILESYSTEMS is not set 55# CONFIG_NETWORK_FILESYSTEMS is not set
66# CONFIG_RCU_CPU_STALL_DETECTOR is not set
67CONFIG_SYSCTL_SYSCALL_CHECK=y
68CONFIG_BOOTPARAM=y 56CONFIG_BOOTPARAM=y
69CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0" 57CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
70# CONFIG_CRC32 is not set
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index c5018a68819b..4f4ccd13c11b 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -1,11 +1,7 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set 1# CONFIG_SWAP is not set
3CONFIG_LOG_BUF_SHIFT=14 2CONFIG_LOG_BUF_SHIFT=14
4CONFIG_SYSFS_DEPRECATED=y
5CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_SYSCTL_SYSCALL=y 3CONFIG_SYSCTL_SYSCALL=y
7# CONFIG_KALLSYMS is not set 4# CONFIG_KALLSYMS is not set
8# CONFIG_HOTPLUG is not set
9# CONFIG_FUTEX is not set 5# CONFIG_FUTEX is not set
10# CONFIG_EPOLL is not set 6# CONFIG_EPOLL is not set
11# CONFIG_SIGNALFD is not set 7# CONFIG_SIGNALFD is not set
@@ -20,19 +16,16 @@ CONFIG_MODULES=y
20# CONFIG_IOSCHED_DEADLINE is not set 16# CONFIG_IOSCHED_DEADLINE is not set
21# CONFIG_IOSCHED_CFQ is not set 17# CONFIG_IOSCHED_CFQ is not set
22CONFIG_COLDFIRE=y 18CONFIG_COLDFIRE=y
23CONFIG_M547x=y
24CONFIG_CLOCK_SET=y
25CONFIG_CLOCK_FREQ=266000000
26# CONFIG_4KSTACKS is not set 19# CONFIG_4KSTACKS is not set
27CONFIG_RAMBASE=0x0 20CONFIG_RAMBASE=0x0
28CONFIG_RAMSIZE=0x2000000 21CONFIG_RAMSIZE=0x2000000
29CONFIG_VECTORBASE=0x0 22CONFIG_VECTORBASE=0x0
30CONFIG_MBAR=0xff000000 23CONFIG_MBAR=0xff000000
31CONFIG_KERNELBASE=0x20000 24CONFIG_KERNELBASE=0x20000
25CONFIG_PCI=y
32# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 26# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
33# CONFIG_FW_LOADER is not set 27# CONFIG_FW_LOADER is not set
34CONFIG_MTD=y 28CONFIG_MTD=y
35CONFIG_MTD_CHAR=y
36CONFIG_MTD_BLOCK=y 29CONFIG_MTD_BLOCK=y
37CONFIG_MTD_CFI=y 30CONFIG_MTD_CFI=y
38CONFIG_MTD_JEDECPROBE=y 31CONFIG_MTD_JEDECPROBE=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1555bc189c7d..eb85bd9c6180 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += kvm_para.h
18generic-y += local.h 18generic-y += local.h
19generic-y += local64.h 19generic-y += local64.h
20generic-y += mcs_spinlock.h 20generic-y += mcs_spinlock.h
21generic-y += mm-arch-hooks.h
21generic-y += mman.h 22generic-y += mman.h
22generic-y += mutex.h 23generic-y += mutex.h
23generic-y += percpu.h 24generic-y += percpu.h
diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
index c94557b91448..50aa4dac9ca2 100644
--- a/arch/m68k/include/asm/coldfire.h
+++ b/arch/m68k/include/asm/coldfire.h
@@ -19,7 +19,7 @@
19 * in any case new boards come along from time to time that have yet 19 * in any case new boards come along from time to time that have yet
20 * another different clocking frequency. 20 * another different clocking frequency.
21 */ 21 */
22#ifdef CONFIG_CLOCK_SET 22#ifdef CONFIG_CLOCK_FREQ
23#define MCF_CLK CONFIG_CLOCK_FREQ 23#define MCF_CLK CONFIG_CLOCK_FREQ
24#else 24#else
25#error "Don't know what your ColdFire CPU clock frequency is??" 25#error "Don't know what your ColdFire CPU clock frequency is??"
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 618c85d3c786..f55cad529400 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -413,7 +413,8 @@ static inline void isa_delay(void)
413#define writew(val, addr) out_le16((addr), (val)) 413#define writew(val, addr) out_le16((addr), (val))
414#endif /* CONFIG_ATARI_ROM_ISA */ 414#endif /* CONFIG_ATARI_ROM_ISA */
415 415
416#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) 416#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \
417 !(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE))
417/* 418/*
418 * We need to define dummy functions for GENERIC_IOMAP support. 419 * We need to define dummy functions for GENERIC_IOMAP support.
419 */ 420 */
diff --git a/arch/m68k/include/asm/mm-arch-hooks.h b/arch/m68k/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 7e8709bc90ae..000000000000
--- a/arch/m68k/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_M68K_MM_ARCH_HOOKS_H
13#define _ASM_M68K_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_M68K_MM_ARCH_HOOKS_H */
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 199320f3c345..df31353fd200 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
25generic-y += local.h 25generic-y += local.h
26generic-y += local64.h 26generic-y += local64.h
27generic-y += mcs_spinlock.h 27generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h
28generic-y += msgbuf.h 29generic-y += msgbuf.h
29generic-y += mutex.h 30generic-y += mutex.h
30generic-y += param.h 31generic-y += param.h
diff --git a/arch/metag/include/asm/mm-arch-hooks.h b/arch/metag/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b0072b2eb0de..000000000000
--- a/arch/metag/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_METAG_MM_ARCH_HOOKS_H
13#define _ASM_METAG_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_METAG_MM_ARCH_HOOKS_H */
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 9989ddb169ca..2f222f355c4b 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += device.h
6generic-y += exec.h 6generic-y += exec.h
7generic-y += irq_work.h 7generic-y += irq_work.h
8generic-y += mcs_spinlock.h 8generic-y += mcs_spinlock.h
9generic-y += mm-arch-hooks.h
9generic-y += preempt.h 10generic-y += preempt.h
10generic-y += syscalls.h 11generic-y += syscalls.h
11generic-y += trace_clock.h 12generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/mm-arch-hooks.h b/arch/microblaze/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 5c4065911bda..000000000000
--- a/arch/microblaze/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
13#define _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_MICROBLAZE_MM_ARCH_HOOKS_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2a14585c90d2..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
151 select BCM7120_L2_IRQ 151 select BCM7120_L2_IRQ
152 select BRCMSTB_L2_IRQ 152 select BRCMSTB_L2_IRQ
153 select IRQ_MIPS_CPU 153 select IRQ_MIPS_CPU
154 select RAW_IRQ_ACCESSORS
155 select DMA_NONCOHERENT 154 select DMA_NONCOHERENT
156 select SYS_SUPPORTS_32BIT_KERNEL 155 select SYS_SUPPORTS_32BIT_KERNEL
157 select SYS_SUPPORTS_LITTLE_ENDIAN 156 select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -1427,6 +1426,7 @@ config CPU_MIPS64_R6
1427 select CPU_SUPPORTS_HIGHMEM 1426 select CPU_SUPPORTS_HIGHMEM
1428 select CPU_SUPPORTS_MSA 1427 select CPU_SUPPORTS_MSA
1429 select GENERIC_CSUM 1428 select GENERIC_CSUM
1429 select MIPS_O32_FP64_SUPPORT if MIPS32_O32
1430 help 1430 help
1431 Choose this option to build a kernel for release 6 or later of the 1431 Choose this option to build a kernel for release 6 or later of the
1432 MIPS64 architecture. New MIPS processors, starting with the Warrior 1432 MIPS64 architecture. New MIPS processors, starting with the Warrior
@@ -2231,7 +2231,7 @@ config MIPS_CMP
2231 2231
2232config MIPS_CPS 2232config MIPS_CPS
2233 bool "MIPS Coherent Processing System support" 2233 bool "MIPS Coherent Processing System support"
2234 depends on SYS_SUPPORTS_MIPS_CPS && !64BIT 2234 depends on SYS_SUPPORTS_MIPS_CPS
2235 select MIPS_CM 2235 select MIPS_CM
2236 select MIPS_CPC 2236 select MIPS_CPC
2237 select MIPS_CPS_PM if HOTPLUG_CPU 2237 select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2262,11 +2262,6 @@ config MIPS_CM
2262config MIPS_CPC 2262config MIPS_CPC
2263 bool 2263 bool
2264 2264
2265config SB1_PASS_1_WORKAROUNDS
2266 bool
2267 depends on CPU_SB1_PASS_1
2268 default y
2269
2270config SB1_PASS_2_WORKAROUNDS 2265config SB1_PASS_2_WORKAROUNDS
2271 bool 2266 bool
2272 depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2) 2267 depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2)
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index ae2dd59050f7..252e347958f3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -181,13 +181,6 @@ cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
181cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) 181cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
182cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,) 182cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,)
183 183
184ifdef CONFIG_CPU_SB1
185ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
186KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds
187KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
188endif
189endif
190
191# For smartmips configurations, there are hundreds of warnings due to ISA overrides 184# For smartmips configurations, there are hundreds of warnings due to ISA overrides
192# in assembly and header files. smartmips is only supported for MIPS32r1 onwards 185# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
193# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or 186# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
190{ 190{
191 return ATH79_MISC_IRQ(5); 191 return ATH79_MISC_IRQ(5);
192} 192}
193EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
193 194
194unsigned int get_c0_compare_int(void) 195unsigned int get_c0_compare_int(void)
195{ 196{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
43 43
44 if (action & SMP_CALL_FUNCTION) 44 if (action & SMP_CALL_FUNCTION)
45 smp_call_function_interrupt(); 45 generic_smp_call_function_interrupt();
46 if (action & SMP_RESCHEDULE_YOURSELF) 46 if (action & SMP_RESCHEDULE_YOURSELF)
47 scheduler_ipi(); 47 scheduler_ipi();
48 48
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 7fe5c61a3cb8..1f8546081d20 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += emergency-restart.h
7generic-y += irq_work.h 7generic-y += irq_work.h
8generic-y += local64.h 8generic-y += local64.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += mm-arch-hooks.h
10generic-y += mutex.h 11generic-y += mutex.h
11generic-y += parport.h 12generic-y += parport.h
12generic-y += percpu.h 13generic-y += percpu.h
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 084780b355aa..1b0625189835 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
74 goto fr_common; 74 goto fr_common;
75 75
76 case FPU_64BIT: 76 case FPU_64BIT:
77#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ 77#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
78 || defined(CONFIG_64BIT)) 78 || defined(CONFIG_64BIT))
79 /* we only have a 32-bit FPU */ 79 /* we only have a 32-bit FPU */
80 return SIGFPE; 80 return SIGFPE;
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
2#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
3
4#include <asm/bmips.h>
5
6#define plat_post_dma_flush bmips_post_dma_flush
7
8#include <asm/mach-generic/dma-coherence.h>
9
10#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 37c08a27b4f0..c9f7e231e66b 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & 2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
3 * Insititute of Computing Technology 3 * Institute of Computing Technology
4 * Author: Xiang Gao, gaoxiang@ict.ac.cn 4 * Author: Xiang Gao, gaoxiang@ict.ac.cn
5 * Huacai Chen, chenhc@lemote.com 5 * Huacai Chen, chenhc@lemote.com
6 * Xiaofu Meng, Shuangshuang Zhang 6 * Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 0a227d426b9c..520f8fc2c806 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -13,8 +13,7 @@
13#define R4600_V2_HIT_CACHEOP_WAR 0 13#define R4600_V2_HIT_CACHEOP_WAR 0
14#define R5432_CP0_INTERRUPT_WAR 0 14#define R5432_CP0_INTERRUPT_WAR 0
15 15
16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ 16#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
18 17
19#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
20extern int sb1250_m3_workaround_needed(void); 19extern int sb1250_m3_workaround_needed(void);
diff --git a/arch/mips/include/asm/mm-arch-hooks.h b/arch/mips/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b5609fe8e475..000000000000
--- a/arch/mips/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H
13#define _ASM_MIPS_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
182 * Make sure the buddy is global too (if it's !none, 182 * Make sure the buddy is global too (if it's !none,
183 * it better already be global) 183 * it better already be global)
184 */ 184 */
185#ifdef CONFIG_SMP
186 /*
187 * For SMP, multiple CPUs can race, so we need to do
188 * this atomically.
189 */
190#ifdef CONFIG_64BIT
191#define LL_INSN "lld"
192#define SC_INSN "scd"
193#else /* CONFIG_32BIT */
194#define LL_INSN "ll"
195#define SC_INSN "sc"
196#endif
197 unsigned long page_global = _PAGE_GLOBAL;
198 unsigned long tmp;
199
200 __asm__ __volatile__ (
201 " .set push\n"
202 " .set noreorder\n"
203 "1: " LL_INSN " %[tmp], %[buddy]\n"
204 " bnez %[tmp], 2f\n"
205 " or %[tmp], %[tmp], %[global]\n"
206 " " SC_INSN " %[tmp], %[buddy]\n"
207 " beqz %[tmp], 1b\n"
208 " nop\n"
209 "2:\n"
210 " .set pop"
211 : [buddy] "+m" (buddy->pte),
212 [tmp] "=&r" (tmp)
213 : [global] "r" (page_global));
214#else /* !CONFIG_SMP */
185 if (pte_none(*buddy)) 215 if (pte_none(*buddy))
186 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 216 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
217#endif /* CONFIG_SMP */
187 } 218 }
188#endif 219#endif
189} 220}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 2b25d1ba1ea0..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,6 +23,7 @@
23extern int smp_num_siblings; 23extern int smp_num_siblings;
24extern cpumask_t cpu_sibling_map[]; 24extern cpumask_t cpu_sibling_map[];
25extern cpumask_t cpu_core_map[]; 25extern cpumask_t cpu_core_map[];
26extern cpumask_t cpu_foreign_map;
26 27
27#define raw_smp_processor_id() (current_thread_info()->cpu) 28#define raw_smp_processor_id() (current_thread_info()->cpu)
28 29
@@ -82,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
82extern void play_dead(void); 83extern void play_dead(void);
83#endif 84#endif
84 85
85extern asmlinkage void smp_call_function_interrupt(void);
86
87static inline void arch_send_call_function_single_ipi(int cpu) 86static inline void arch_send_call_function_single_ipi(int cpu)
88{ 87{
89 extern struct plat_smp_ops *mp_ops; /* private */ 88 extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
152 .set noreorder 152 .set noreorder
153 bltz k0, 8f 153 bltz k0, 8f
154 move k1, sp 154 move k1, sp
155#ifdef CONFIG_EVA
156 /*
157 * Flush interAptiv's Return Prediction Stack (RPS) by writing
158 * EntryHi. Toggling Config7.RPS is slower and less portable.
159 *
160 * The RPS isn't automatically flushed when exceptions are
161 * taken, which can result in kernel mode speculative accesses
162 * to user addresses if the RPS mispredicts. That's harmless
163 * when user and kernel share the same address space, but with
164 * EVA the same user segments may be unmapped to kernel mode,
165 * even containing sensitive MMIO regions or invalid memory.
166 *
167 * This can happen when the kernel sets the return address to
168 * ret_from_* and jr's to the exception handler, which looks
169 * more like a tail call than a function call. If nested calls
170 * don't evict the last user address in the RPS, it will
171 * mispredict the return and fetch from a user controlled
172 * address into the icache.
173 *
174 * More recent EVA-capable cores with MAAR to restrict
175 * speculative accesses aren't affected.
176 */
177 MFC0 k0, CP0_ENTRYHI
178 MTC0 k0, CP0_ENTRYHI
179#endif
155 .set reorder 180 .set reorder
156 /* Called from user mode, new stack. */ 181 /* Called from user mode, new stack. */
157 get_saved_sp 182 get_saved_sp
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 6c9906f59c6e..9081d88ae44f 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -16,7 +16,7 @@
16 16
17/* 17/*
18 * Keep this struct definition in sync with the sigcontext fragment 18 * Keep this struct definition in sync with the sigcontext fragment
19 * in arch/mips/tools/offset.c 19 * in arch/mips/kernel/asm-offsets.c
20 */ 20 */
21struct sigcontext { 21struct sigcontext {
22 unsigned int sc_regmask; /* Unused */ 22 unsigned int sc_regmask; /* Unused */
@@ -46,7 +46,7 @@ struct sigcontext {
46#include <linux/posix_types.h> 46#include <linux/posix_types.h>
47/* 47/*
48 * Keep this struct definition in sync with the sigcontext fragment 48 * Keep this struct definition in sync with the sigcontext fragment
49 * in arch/mips/tools/offset.c 49 * in arch/mips/kernel/asm-offsets.c
50 * 50 *
51 * Warning: this structure illdefined with sc_badvaddr being just an unsigned 51 * Warning: this structure illdefined with sc_badvaddr being just an unsigned
52 * int so it was changed to unsigned long in 2.6.0-test1. This may break 52 * int so it was changed to unsigned long in 2.6.0-test1. This may break
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index beabe19ff8e5..072fab13645d 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * offset.c: Calculate pt_regs and task_struct offsets. 2 * asm-offsets.c: Calculate pt_regs and task_struct offsets.
3 * 3 *
4 * Copyright (C) 1996 David S. Miller 4 * Copyright (C) 1996 David S. Miller
5 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle 5 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c0c5e5972256..d8f9b357b222 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
600 break; 600 break;
601 601
602 case blezl_op: /* not really i_format */ 602 case blezl_op: /* not really i_format */
603 if (NO_R6EMU) 603 if (!insn.i_format.rt && NO_R6EMU)
604 goto sigill_r6; 604 goto sigill_r6;
605 case blez_op: 605 case blez_op:
606 /* 606 /*
@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
635 break; 635 break;
636 636
637 case bgtzl_op: 637 case bgtzl_op:
638 if (NO_R6EMU) 638 if (!insn.i_format.rt && NO_R6EMU)
639 goto sigill_r6; 639 goto sigill_r6;
640 case bgtz_op: 640 case bgtz_op:
641 /* 641 /*
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 55b759a0019e..1b6ca634e646 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
60 nop 60 nop
61 61
62 /* This is an NMI */ 62 /* This is an NMI */
63 la k0, nmi_handler 63 PTR_LA k0, nmi_handler
64 jr k0 64 jr k0
65 nop 65 nop
66 66
@@ -107,10 +107,10 @@ not_nmi:
107 mul t1, t1, t0 107 mul t1, t1, t0
108 mul t1, t1, t2 108 mul t1, t1, t2
109 109
110 li a0, KSEG0 110 li a0, CKSEG0
111 add a1, a0, t1 111 PTR_ADD a1, a0, t1
1121: cache Index_Store_Tag_I, 0(a0) 1121: cache Index_Store_Tag_I, 0(a0)
113 add a0, a0, t0 113 PTR_ADD a0, a0, t0
114 bne a0, a1, 1b 114 bne a0, a1, 1b
115 nop 115 nop
116icache_done: 116icache_done:
@@ -134,12 +134,12 @@ icache_done:
134 mul t1, t1, t0 134 mul t1, t1, t0
135 mul t1, t1, t2 135 mul t1, t1, t2
136 136
137 li a0, KSEG0 137 li a0, CKSEG0
138 addu a1, a0, t1 138 PTR_ADDU a1, a0, t1
139 subu a1, a1, t0 139 PTR_SUBU a1, a1, t0
1401: cache Index_Store_Tag_D, 0(a0) 1401: cache Index_Store_Tag_D, 0(a0)
141 bne a0, a1, 1b 141 bne a0, a1, 1b
142 add a0, a0, t0 142 PTR_ADD a0, a0, t0
143dcache_done: 143dcache_done:
144 144
145 /* Set Kseg0 CCA to that in s0 */ 145 /* Set Kseg0 CCA to that in s0 */
@@ -152,11 +152,11 @@ dcache_done:
152 152
153 /* Enter the coherent domain */ 153 /* Enter the coherent domain */
154 li t0, 0xff 154 li t0, 0xff
155 sw t0, GCR_CL_COHERENCE_OFS(v1) 155 PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
156 ehb 156 ehb
157 157
158 /* Jump to kseg0 */ 158 /* Jump to kseg0 */
159 la t0, 1f 159 PTR_LA t0, 1f
160 jr t0 160 jr t0
161 nop 161 nop
162 162
@@ -178,9 +178,9 @@ dcache_done:
178 nop 178 nop
179 179
180 /* Off we go! */ 180 /* Off we go! */
181 lw t1, VPEBOOTCFG_PC(v0) 181 PTR_L t1, VPEBOOTCFG_PC(v0)
182 lw gp, VPEBOOTCFG_GP(v0) 182 PTR_L gp, VPEBOOTCFG_GP(v0)
183 lw sp, VPEBOOTCFG_SP(v0) 183 PTR_L sp, VPEBOOTCFG_SP(v0)
184 jr t1 184 jr t1
185 nop 185 nop
186 END(mips_cps_core_entry) 186 END(mips_cps_core_entry)
@@ -217,7 +217,7 @@ LEAF(excep_intex)
217 217
218.org 0x480 218.org 0x480
219LEAF(excep_ejtag) 219LEAF(excep_ejtag)
220 la k0, ejtag_debug_handler 220 PTR_LA k0, ejtag_debug_handler
221 jr k0 221 jr k0
222 nop 222 nop
223 END(excep_ejtag) 223 END(excep_ejtag)
@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
229 nop 229 nop
230 230
231 .set push 231 .set push
232 .set mips32r2 232 .set mips64r2
233 .set mt 233 .set mt
234 234
235 /* Only allow 1 TC per VPE to execute... */ 235 /* Only allow 1 TC per VPE to execute... */
@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
237 237
238 /* ...and for the moment only 1 VPE */ 238 /* ...and for the moment only 1 VPE */
239 dvpe 239 dvpe
240 la t1, 1f 240 PTR_LA t1, 1f
241 jr.hb t1 241 jr.hb t1
242 nop 242 nop
243 243
@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
250 mfc0 t0, CP0_MVPCONF0 250 mfc0 t0, CP0_MVPCONF0
251 srl t0, t0, MVPCONF0_PVPE_SHIFT 251 srl t0, t0, MVPCONF0_PVPE_SHIFT
252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
253 addiu t7, t0, 1 253 addiu ta3, t0, 1
254 254
255 /* If there's only 1, we're done */ 255 /* If there's only 1, we're done */
256 beqz t0, 2f 256 beqz t0, 2f
257 nop 257 nop
258 258
259 /* Loop through each VPE within this core */ 259 /* Loop through each VPE within this core */
260 li t5, 1 260 li ta1, 1
261 261
2621: /* Operate on the appropriate TC */ 2621: /* Operate on the appropriate TC */
263 mtc0 t5, CP0_VPECONTROL 263 mtc0 ta1, CP0_VPECONTROL
264 ehb 264 ehb
265 265
266 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 266 /* Bind TC to VPE (1:1 TC:VPE mapping) */
267 mttc0 t5, CP0_TCBIND 267 mttc0 ta1, CP0_TCBIND
268 268
269 /* Set exclusive TC, non-active, master */ 269 /* Set exclusive TC, non-active, master */
270 li t0, VPECONF0_MVP 270 li t0, VPECONF0_MVP
271 sll t1, t5, VPECONF0_XTC_SHIFT 271 sll t1, ta1, VPECONF0_XTC_SHIFT
272 or t0, t0, t1 272 or t0, t0, t1
273 mttc0 t0, CP0_VPECONF0 273 mttc0 t0, CP0_VPECONF0
274 274
@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
280 mttc0 t0, CP0_TCHALT 280 mttc0 t0, CP0_TCHALT
281 281
282 /* Next VPE */ 282 /* Next VPE */
283 addiu t5, t5, 1 283 addiu ta1, ta1, 1
284 slt t0, t5, t7 284 slt t0, ta1, ta3
285 bnez t0, 1b 285 bnez t0, 1b
286 nop 286 nop
287 287
@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init)
298 298
299LEAF(mips_cps_boot_vpes) 299LEAF(mips_cps_boot_vpes)
300 /* Retrieve CM base address */ 300 /* Retrieve CM base address */
301 la t0, mips_cm_base 301 PTR_LA t0, mips_cm_base
302 lw t0, 0(t0) 302 PTR_L t0, 0(t0)
303 303
304 /* Calculate a pointer to this cores struct core_boot_config */ 304 /* Calculate a pointer to this cores struct core_boot_config */
305 lw t0, GCR_CL_ID_OFS(t0) 305 PTR_L t0, GCR_CL_ID_OFS(t0)
306 li t1, COREBOOTCFG_SIZE 306 li t1, COREBOOTCFG_SIZE
307 mul t0, t0, t1 307 mul t0, t0, t1
308 la t1, mips_cps_core_bootcfg 308 PTR_LA t1, mips_cps_core_bootcfg
309 lw t1, 0(t1) 309 PTR_L t1, 0(t1)
310 addu t0, t0, t1 310 PTR_ADDU t0, t0, t1
311 311
312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 312 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
313 has_mt t6, 1f 313 has_mt ta2, 1f
314 li t9, 0 314 li t9, 0
315 315
316 /* Find the number of VPEs present in the core */ 316 /* Find the number of VPEs present in the core */
@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes)
3341: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 3341: /* Calculate a pointer to this VPEs struct vpe_boot_config */
335 li t1, VPEBOOTCFG_SIZE 335 li t1, VPEBOOTCFG_SIZE
336 mul v0, t9, t1 336 mul v0, t9, t1
337 lw t7, COREBOOTCFG_VPECONFIG(t0) 337 PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
338 addu v0, v0, t7 338 PTR_ADDU v0, v0, ta3
339 339
340#ifdef CONFIG_MIPS_MT 340#ifdef CONFIG_MIPS_MT
341 341
342 /* If the core doesn't support MT then return */ 342 /* If the core doesn't support MT then return */
343 bnez t6, 1f 343 bnez ta2, 1f
344 nop 344 nop
345 jr ra 345 jr ra
346 nop 346 nop
347 347
348 .set push 348 .set push
349 .set mips32r2 349 .set mips64r2
350 .set mt 350 .set mt
351 351
3521: /* Enter VPE configuration state */ 3521: /* Enter VPE configuration state */
353 dvpe 353 dvpe
354 la t1, 1f 354 PTR_LA t1, 1f
355 jr.hb t1 355 jr.hb t1
356 nop 356 nop
3571: mfc0 t1, CP0_MVPCONTROL 3571: mfc0 t1, CP0_MVPCONTROL
@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
360 ehb 360 ehb
361 361
362 /* Loop through each VPE */ 362 /* Loop through each VPE */
363 lw t6, COREBOOTCFG_VPEMASK(t0) 363 PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
364 move t8, t6 364 move t8, ta2
365 li t5, 0 365 li ta1, 0
366 366
367 /* Check whether the VPE should be running. If not, skip it */ 367 /* Check whether the VPE should be running. If not, skip it */
3681: andi t0, t6, 1 3681: andi t0, ta2, 1
369 beqz t0, 2f 369 beqz t0, 2f
370 nop 370 nop
371 371
@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
373 mfc0 t0, CP0_VPECONTROL 373 mfc0 t0, CP0_VPECONTROL
374 ori t0, t0, VPECONTROL_TARGTC 374 ori t0, t0, VPECONTROL_TARGTC
375 xori t0, t0, VPECONTROL_TARGTC 375 xori t0, t0, VPECONTROL_TARGTC
376 or t0, t0, t5 376 or t0, t0, ta1
377 mtc0 t0, CP0_VPECONTROL 377 mtc0 t0, CP0_VPECONTROL
378 ehb 378 ehb
379 379
@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
384 384
385 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 385 /* Calculate a pointer to the VPEs struct vpe_boot_config */
386 li t0, VPEBOOTCFG_SIZE 386 li t0, VPEBOOTCFG_SIZE
387 mul t0, t0, t5 387 mul t0, t0, ta1
388 addu t0, t0, t7 388 addu t0, t0, ta3
389 389
390 /* Set the TC restart PC */ 390 /* Set the TC restart PC */
391 lw t1, VPEBOOTCFG_PC(t0) 391 lw t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
423 mttc0 t0, CP0_VPECONF0 423 mttc0 t0, CP0_VPECONF0
424 424
425 /* Next VPE */ 425 /* Next VPE */
4262: srl t6, t6, 1 4262: srl ta2, ta2, 1
427 addiu t5, t5, 1 427 addiu ta1, ta1, 1
428 bnez t6, 1b 428 bnez ta2, 1b
429 nop 429 nop
430 430
431 /* Leave VPE configuration state */ 431 /* Leave VPE configuration state */
@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
445 /* This VPE should be offline, halt the TC */ 445 /* This VPE should be offline, halt the TC */
446 li t0, TCHALT_H 446 li t0, TCHALT_H
447 mtc0 t0, CP0_TCHALT 447 mtc0 t0, CP0_TCHALT
448 la t0, 1f 448 PTR_LA t0, 1f
4491: jr.hb t0 4491: jr.hb t0
450 nop 450 nop
451 451
@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
466 .set noat 466 .set noat
467 lw $1, TI_CPU(gp) 467 lw $1, TI_CPU(gp)
468 sll $1, $1, LONGLOG 468 sll $1, $1, LONGLOG
469 la \dest, __per_cpu_offset 469 PTR_LA \dest, __per_cpu_offset
470 addu $1, $1, \dest 470 addu $1, $1, \dest
471 lw $1, 0($1) 471 lw $1, 0($1)
472 la \dest, cps_cpu_state 472 PTR_LA \dest, cps_cpu_state
473 addu \dest, \dest, $1 473 addu \dest, \dest, $1
474 .set pop 474 .set pop
475 .endm 475 .endm
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e7003f12..baa7b6fc0a60 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
407 .set noat 407 .set noat
408 SAVE_ALL 408 SAVE_ALL
409 FEXPORT(handle_\exception\ext) 409 FEXPORT(handle_\exception\ext)
410 __BUILD_clear_\clear 410 __build_clear_\clear
411 .set at 411 .set at
412 __BUILD_\verbose \exception 412 __BUILD_\verbose \exception
413 move a0, sp 413 move a0, sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
154 unsigned long __user *user_mask_ptr) 154 unsigned long __user *user_mask_ptr)
155{ 155{
156 unsigned int real_len; 156 unsigned int real_len;
157 cpumask_t mask; 157 cpumask_t allowed, mask;
158 int retval; 158 int retval;
159 struct task_struct *p; 159 struct task_struct *p;
160 160
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); 176 cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
177 cpumask_and(&mask, &allowed, cpu_active_mask);
177 178
178out_unlock: 179out_unlock:
179 read_unlock(&tasklist_lock); 180 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
38 return mips_machine_name; 38 return mips_machine_name;
39} 39}
40 40
41#ifdef CONFIG_OF 41#ifdef CONFIG_USE_OF
42void __init early_init_dt_add_memory_arch(u64 base, u64 size) 42void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{ 43{
44 return add_memory_region(base, size, BOOT_MEM_RAM); 44 return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
24 24
25process_entry: 25process_entry:
26 PTR_L s2, (s0) 26 PTR_L s2, (s0)
27 PTR_ADD s0, s0, SZREG 27 PTR_ADDIU s0, s0, SZREG
28 28
29 /* 29 /*
30 * In case of a kdump/crash kernel, the indirection page is not 30 * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
61 /* copy page word by word */ 61 /* copy page word by word */
62 REG_L s5, (s2) 62 REG_L s5, (s2)
63 REG_S s5, (s4) 63 REG_S s5, (s4)
64 PTR_ADD s4, s4, SZREG 64 PTR_ADDIU s4, s4, SZREG
65 PTR_ADD s2, s2, SZREG 65 PTR_ADDIU s2, s2, SZREG
66 LONG_SUB s6, s6, 1 66 LONG_ADDIU s6, s6, -1
67 beq s6, zero, process_entry 67 beq s6, zero, process_entry
68 b copy_word 68 b copy_word
69 b process_entry 69 b process_entry
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 6e8de80bb446..4cc13508d967 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp)
73 .set noreorder 73 .set noreorder
74 .set nomacro 74 .set nomacro
75 75
761: user_lw(t5, 16(t0)) # argument #5 from usp 76load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
774: user_lw(t6, 20(t0)) # argument #6 from usp 77load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
783: user_lw(t7, 24(t0)) # argument #7 from usp 78load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
792: user_lw(t8, 28(t0)) # argument #8 from usp 79load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
80loads_done:
80 81
81 sw t5, 16(sp) # argument #5 to ksp 82 sw t5, 16(sp) # argument #5 to ksp
82 sw t6, 20(sp) # argument #6 to ksp 83 sw t6, 20(sp) # argument #6 to ksp
@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp)
85 .set pop 86 .set pop
86 87
87 .section __ex_table,"a" 88 .section __ex_table,"a"
88 PTR 1b,bad_stack 89 PTR load_a4, bad_stack_a4
89 PTR 2b,bad_stack 90 PTR load_a5, bad_stack_a5
90 PTR 3b,bad_stack 91 PTR load_a6, bad_stack_a6
91 PTR 4b,bad_stack 92 PTR load_a7, bad_stack_a7
92 .previous 93 .previous
93 94
94 lw t0, TI_FLAGS($28) # syscall tracing enabled? 95 lw t0, TI_FLAGS($28) # syscall tracing enabled?
@@ -153,8 +154,8 @@ syscall_trace_entry:
153/* ------------------------------------------------------------------------ */ 154/* ------------------------------------------------------------------------ */
154 155
155 /* 156 /*
156 * The stackpointer for a call with more than 4 arguments is bad. 157 * Our open-coded access area sanity test for the stack pointer
157 * We probably should handle this case a bit more drastic. 158 * failed. We probably should handle this case a bit more drastic.
158 */ 159 */
159bad_stack: 160bad_stack:
160 li v0, EFAULT 161 li v0, EFAULT
@@ -163,6 +164,22 @@ bad_stack:
163 sw t0, PT_R7(sp) 164 sw t0, PT_R7(sp)
164 j o32_syscall_exit 165 j o32_syscall_exit
165 166
167bad_stack_a4:
168 li t5, 0
169 b load_a5
170
171bad_stack_a5:
172 li t6, 0
173 b load_a6
174
175bad_stack_a6:
176 li t7, 0
177 b load_a7
178
179bad_stack_a7:
180 li t8, 0
181 b loads_done
182
166 /* 183 /*
167 * The system call does not exist in this kernel 184 * The system call does not exist in this kernel
168 */ 185 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
80 SAVE_STATIC 80 SAVE_STATIC
81 move s0, t2 81 move s0, t2
82 move a0, sp 82 move a0, sp
83 daddiu a1, v0, __NR_64_Linux 83 move a1, v0
84 jal syscall_trace_enter 84 jal syscall_trace_enter
85 85
86 bltz v0, 2f # seccomp failed? Skip syscall 86 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
72 SAVE_STATIC 72 SAVE_STATIC
73 move s0, t2 73 move s0, t2
74 move a0, sp 74 move a0, sp
75 daddiu a1, v0, __NR_N32_Linux 75 move a1, v0
76 jal syscall_trace_enter 76 jal syscall_trace_enter
77 77
78 bltz v0, 2f # seccomp failed? Skip syscall 78 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d07b210fbeff..f543ff4feef9 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
69 daddu t1, t0, 32 69 daddu t1, t0, 32
70 bltz t1, bad_stack 70 bltz t1, bad_stack
71 71
721: lw a4, 16(t0) # argument #5 from usp 72load_a4: lw a4, 16(t0) # argument #5 from usp
732: lw a5, 20(t0) # argument #6 from usp 73load_a5: lw a5, 20(t0) # argument #6 from usp
743: lw a6, 24(t0) # argument #7 from usp 74load_a6: lw a6, 24(t0) # argument #7 from usp
754: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls) 75load_a7: lw a7, 28(t0) # argument #8 from usp
76loads_done:
76 77
77 .section __ex_table,"a" 78 .section __ex_table,"a"
78 PTR 1b, bad_stack 79 PTR load_a4, bad_stack_a4
79 PTR 2b, bad_stack 80 PTR load_a5, bad_stack_a5
80 PTR 3b, bad_stack 81 PTR load_a6, bad_stack_a6
81 PTR 4b, bad_stack 82 PTR load_a7, bad_stack_a7
82 .previous 83 .previous
83 84
84 li t1, _TIF_WORK_SYSCALL_ENTRY 85 li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -167,6 +168,22 @@ bad_stack:
167 sd t0, PT_R7(sp) 168 sd t0, PT_R7(sp)
168 j o32_syscall_exit 169 j o32_syscall_exit
169 170
171bad_stack_a4:
172 li a4, 0
173 b load_a5
174
175bad_stack_a5:
176 li a5, 0
177 b load_a6
178
179bad_stack_a6:
180 li a6, 0
181 b load_a7
182
183bad_stack_a7:
184 li a7, 0
185 b loads_done
186
170not_o32_scall: 187not_o32_scall:
171 /* 188 /*
172 * This is not an o32 compatibility syscall, pass it on 189 * This is not an o32 compatibility syscall, pass it on
@@ -383,7 +400,7 @@ EXPORT(sys32_call_table)
383 PTR sys_connect /* 4170 */ 400 PTR sys_connect /* 4170 */
384 PTR sys_getpeername 401 PTR sys_getpeername
385 PTR sys_getsockname 402 PTR sys_getsockname
386 PTR sys_getsockopt 403 PTR compat_sys_getsockopt
387 PTR sys_listen 404 PTR sys_listen
388 PTR compat_sys_recv /* 4175 */ 405 PTR compat_sys_recv /* 4175 */
389 PTR compat_sys_recvfrom 406 PTR compat_sys_recvfrom
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index be73c491182b..008b3378653a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -337,6 +337,11 @@ static void __init bootmem_init(void)
337 min_low_pfn = start; 337 min_low_pfn = start;
338 if (end <= reserved_end) 338 if (end <= reserved_end)
339 continue; 339 continue;
340#ifdef CONFIG_BLK_DEV_INITRD
341 /* mapstart should be after initrd_end */
342 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
343 continue;
344#endif
340 if (start >= mapstart) 345 if (start >= mapstart)
341 continue; 346 continue;
342 mapstart = max(reserved_end, start); 347 mapstart = max(reserved_end, start);
@@ -366,14 +371,6 @@ static void __init bootmem_init(void)
366 max_low_pfn = PFN_DOWN(HIGHMEM_START); 371 max_low_pfn = PFN_DOWN(HIGHMEM_START);
367 } 372 }
368 373
369#ifdef CONFIG_BLK_DEV_INITRD
370 /*
371 * mapstart should be after initrd_end
372 */
373 if (initrd_end)
374 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
375#endif
376
377 /* 374 /*
378 * Initialize the boot-time allocator with low memory only. 375 * Initialize the boot-time allocator with low memory only.
379 */ 376 */
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
409 409
410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
411{ 411{
412 memset(to, 0, sizeof *to);
413
414 if (copy_from_user(to, from, 3*sizeof(int)) || 412 if (copy_from_user(to, from, 3*sizeof(int)) ||
415 copy_from_user(to->_sifields._pad, 413 copy_from_user(to->_sifields._pad,
416 from->_sifields._pad, SI_PAD_SIZE32)) 414 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
284 if (action == 0) 284 if (action == 0)
285 scheduler_ipi(); 285 scheduler_ipi();
286 else 286 else
287 smp_call_function_interrupt(); 287 generic_smp_call_function_interrupt();
288 288
289 return IRQ_HANDLED; 289 return IRQ_HANDLED;
290} 290}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
336 if (action & SMP_RESCHEDULE_YOURSELF) 336 if (action & SMP_RESCHEDULE_YOURSELF)
337 scheduler_ipi(); 337 scheduler_ipi();
338 if (action & SMP_CALL_FUNCTION) 338 if (action & SMP_CALL_FUNCTION)
339 smp_call_function_interrupt(); 339 generic_smp_call_function_interrupt();
340 340
341 return IRQ_HANDLED; 341 return IRQ_HANDLED;
342} 342}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4251d390b5b6..c88937745b4e 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
133 /* 133 /*
134 * Patch the start of mips_cps_core_entry to provide: 134 * Patch the start of mips_cps_core_entry to provide:
135 * 135 *
136 * v0 = CM base address 136 * v1 = CM base address
137 * s0 = kseg0 CCA 137 * s0 = kseg0 CCA
138 */ 138 */
139 entry_code = (u32 *)&mips_cps_core_entry; 139 entry_code = (u32 *)&mips_cps_core_entry;
@@ -369,7 +369,7 @@ void play_dead(void)
369 369
370static void wait_for_sibling_halt(void *ptr_cpu) 370static void wait_for_sibling_halt(void *ptr_cpu)
371{ 371{
372 unsigned cpu = (unsigned)ptr_cpu; 372 unsigned cpu = (unsigned long)ptr_cpu;
373 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 373 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
374 unsigned halted; 374 unsigned halted;
375 unsigned long flags; 375 unsigned long flags;
@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu)
430 */ 430 */
431 err = smp_call_function_single(cpu_death_sibling, 431 err = smp_call_function_single(cpu_death_sibling,
432 wait_for_sibling_halt, 432 wait_for_sibling_halt,
433 (void *)cpu, 1); 433 (void *)(unsigned long)cpu, 1);
434 if (err) 434 if (err)
435 panic("Failed to call remote sibling CPU\n"); 435 panic("Failed to call remote sibling CPU\n");
436 } 436 }
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index faa46ebd9dda..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
63cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 63cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
64EXPORT_SYMBOL(cpu_core_map); 64EXPORT_SYMBOL(cpu_core_map);
65 65
66/*
67 * A logcal cpu mask containing only one VPE per core to
68 * reduce the number of IPIs on large MT systems.
69 */
70cpumask_t cpu_foreign_map __read_mostly;
71EXPORT_SYMBOL(cpu_foreign_map);
72
66/* representing cpus for which sibling maps can be computed */ 73/* representing cpus for which sibling maps can be computed */
67static cpumask_t cpu_sibling_setup_map; 74static cpumask_t cpu_sibling_setup_map;
68 75
@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
103 } 110 }
104} 111}
105 112
113/*
114 * Calculate a new cpu_foreign_map mask whenever a
115 * new cpu appears or disappears.
116 */
117static inline void calculate_cpu_foreign_map(void)
118{
119 int i, k, core_present;
120 cpumask_t temp_foreign_map;
121
122 /* Re-calculate the mask */
123 for_each_online_cpu(i) {
124 core_present = 0;
125 for_each_cpu(k, &temp_foreign_map)
126 if (cpu_data[i].package == cpu_data[k].package &&
127 cpu_data[i].core == cpu_data[k].core)
128 core_present = 1;
129 if (!core_present)
130 cpumask_set_cpu(i, &temp_foreign_map);
131 }
132
133 cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
134}
135
106struct plat_smp_ops *mp_ops; 136struct plat_smp_ops *mp_ops;
107EXPORT_SYMBOL(mp_ops); 137EXPORT_SYMBOL(mp_ops);
108 138
@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
146 set_cpu_sibling_map(cpu); 176 set_cpu_sibling_map(cpu);
147 set_cpu_core_map(cpu); 177 set_cpu_core_map(cpu);
148 178
179 calculate_cpu_foreign_map();
180
149 cpumask_set_cpu(cpu, &cpu_callin_map); 181 cpumask_set_cpu(cpu, &cpu_callin_map);
150 182
151 synchronise_count_slave(cpu); 183 synchronise_count_slave(cpu);
@@ -160,22 +192,21 @@ asmlinkage void start_secondary(void)
160 cpu_startup_entry(CPUHP_ONLINE); 192 cpu_startup_entry(CPUHP_ONLINE);
161} 193}
162 194
163/*
164 * Call into both interrupt handlers, as we share the IPI for them
165 */
166void __irq_entry smp_call_function_interrupt(void)
167{
168 irq_enter();
169 generic_smp_call_function_interrupt();
170 irq_exit();
171}
172
173static void stop_this_cpu(void *dummy) 195static void stop_this_cpu(void *dummy)
174{ 196{
175 /* 197 /*
176 * Remove this CPU: 198 * Remove this CPU. Be a bit slow here and
199 * set the bits for every online CPU so we don't miss
200 * any IPI whilst taking this VPE down.
177 */ 201 */
202
203 cpumask_copy(&cpu_foreign_map, cpu_online_mask);
204
205 /* Make it visible to every other CPU */
206 smp_mb();
207
178 set_cpu_online(smp_processor_id(), false); 208 set_cpu_online(smp_processor_id(), false);
209 calculate_cpu_foreign_map();
179 local_irq_disable(); 210 local_irq_disable();
180 while (1); 211 while (1);
181} 212}
@@ -197,6 +228,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
197 mp_ops->prepare_cpus(max_cpus); 228 mp_ops->prepare_cpus(max_cpus);
198 set_cpu_sibling_map(0); 229 set_cpu_sibling_map(0);
199 set_cpu_core_map(0); 230 set_cpu_core_map(0);
231 calculate_cpu_foreign_map();
200#ifndef CONFIG_HOTPLUG_CPU 232#ifndef CONFIG_HOTPLUG_CPU
201 init_cpu_present(cpu_possible_mask); 233 init_cpu_present(cpu_possible_mask);
202#endif 234#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2a7b38ed23f0..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
192void show_stack(struct task_struct *task, unsigned long *sp) 192void show_stack(struct task_struct *task, unsigned long *sp)
193{ 193{
194 struct pt_regs regs; 194 struct pt_regs regs;
195 mm_segment_t old_fs = get_fs();
195 if (sp) { 196 if (sp) {
196 regs.regs[29] = (unsigned long)sp; 197 regs.regs[29] = (unsigned long)sp;
197 regs.regs[31] = 0; 198 regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
210 prepare_frametrace(&regs); 211 prepare_frametrace(&regs);
211 } 212 }
212 } 213 }
214 /*
215 * show_stack() deals exclusively with kernel mode, so be sure to access
216 * the stack in the kernel (not user) address space.
217 */
218 set_fs(KERNEL_DS);
213 show_stacktrace(task, &regs); 219 show_stacktrace(task, &regs);
220 set_fs(old_fs);
214} 221}
215 222
216static void show_code(unsigned int __user *pc) 223static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1519 const int field = 2 * sizeof(unsigned long); 1526 const int field = 2 * sizeof(unsigned long);
1520 int multi_match = regs->cp0_status & ST0_TS; 1527 int multi_match = regs->cp0_status & ST0_TS;
1521 enum ctx_state prev_state; 1528 enum ctx_state prev_state;
1529 mm_segment_t old_fs = get_fs();
1522 1530
1523 prev_state = exception_enter(); 1531 prev_state = exception_enter();
1524 show_regs(regs); 1532 show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1540 dump_tlb_all(); 1548 dump_tlb_all();
1541 } 1549 }
1542 1550
1551 if (!user_mode(regs))
1552 set_fs(KERNEL_DS);
1553
1543 show_code((unsigned int __user *) regs->cp0_epc); 1554 show_code((unsigned int __user *) regs->cp0_epc);
1544 1555
1556 set_fs(old_fs);
1557
1545 /* 1558 /*
1546 * Some chips may have other causes of machine check (e.g. SB1 1559 * Some chips may have other causes of machine check (e.g. SB1
1547 * graduation timer) 1560 * graduation timer)
@@ -2130,10 +2143,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
2130 BUG_ON(current->mm); 2143 BUG_ON(current->mm);
2131 enter_lazy_tlb(&init_mm, current); 2144 enter_lazy_tlb(&init_mm, current);
2132 2145
2133 /* Boot CPU's cache setup in setup_arch(). */ 2146 /* Boot CPU's cache setup in setup_arch(). */
2134 if (!is_boot_cpu) 2147 if (!is_boot_cpu)
2135 cpu_cache_init(); 2148 cpu_cache_init();
2136 tlb_init(); 2149 tlb_init();
2137 TLBMISS_HANDLER_SETUP(); 2150 TLBMISS_HANDLER_SETUP();
2138} 2151}
2139 2152
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
438 : "memory"); \ 438 : "memory"); \
439} while(0) 439} while(0)
440 440
441#define StoreDW(addr, value, res) \ 441#define _StoreDW(addr, value, res) \
442do { \ 442do { \
443 __asm__ __volatile__ ( \ 443 __asm__ __volatile__ ( \
444 ".set\tpush\n\t" \ 444 ".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
293 293
294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
295{ 295{
296 smp_call_function_interrupt(); 296 generic_smp_call_function_interrupt();
297 return IRQ_HANDLED; 297 return IRQ_HANDLED;
298} 298}
299 299
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
466{ 466{
467 return ltq_perfcount_irq; 467 return ltq_perfcount_irq;
468} 468}
469EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
469 470
470unsigned int get_c0_compare_int(void) 471unsigned int get_c0_compare_int(void)
471{ 472{
diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson64/common/bonito-irq.c
index cc0e4fd548e6..4e116d23bab3 100644
--- a/arch/mips/loongson64/common/bonito-irq.c
+++ b/arch/mips/loongson64/common/bonito-irq.c
@@ -3,7 +3,7 @@
3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net 3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4 * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org) 4 * Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
5 * 5 *
6 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 6 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
7 * Author: Fuxin Zhang, zhangfx@lemote.com 7 * Author: Fuxin Zhang, zhangfx@lemote.com
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c
index 72fed003a536..01fbed137028 100644
--- a/arch/mips/loongson64/common/cmdline.c
+++ b/arch/mips/loongson64/common/cmdline.c
@@ -6,7 +6,7 @@
6 * Copyright 2003 ICT CAS 6 * Copyright 2003 ICT CAS
7 * Author: Michael Guo <guoyi@ict.ac.cn> 7 * Author: Michael Guo <guoyi@ict.ac.cn>
8 * 8 *
9 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 9 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
10 * Author: Fuxin Zhang, zhangfx@lemote.com 10 * Author: Fuxin Zhang, zhangfx@lemote.com
11 * 11 *
12 * Copyright (C) 2009 Lemote Inc. 12 * Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 12c75db23420..875037063a80 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * CS5536 General timer functions 2 * CS5536 General timer functions
3 * 3 *
4 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 4 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
5 * Author: Yanhua, yanh@lemote.com 5 * Author: Yanhua, yanh@lemote.com
6 * 6 *
7 * Copyright (C) 2009 Lemote Inc. 7 * Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 22f04ca2ff3e..f6c44dd332e2 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -6,7 +6,7 @@
6 * Copyright 2003 ICT CAS 6 * Copyright 2003 ICT CAS
7 * Author: Michael Guo <guoyi@ict.ac.cn> 7 * Author: Michael Guo <guoyi@ict.ac.cn>
8 * 8 *
9 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 9 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
10 * Author: Fuxin Zhang, zhangfx@lemote.com 10 * Author: Fuxin Zhang, zhangfx@lemote.com
11 * 11 *
12 * Copyright (C) 2009 Lemote Inc. 12 * Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson64/common/irq.c
index 687003b19b45..d36d969a4a87 100644
--- a/arch/mips/loongson64/common/irq.c
+++ b/arch/mips/loongson64/common/irq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
3 * Author: Fuxin Zhang, zhangfx@lemote.com 3 * Author: Fuxin Zhang, zhangfx@lemote.com
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson64/common/setup.c
index d477dd6bb326..2dc5122f0e09 100644
--- a/arch/mips/loongson64/common/setup.c
+++ b/arch/mips/loongson64/common/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
3 * Author: Fuxin Zhang, zhangfx@lemote.com 3 * Author: Fuxin Zhang, zhangfx@lemote.com
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson64/fuloong-2e/irq.c
index ef5ec8f3de5f..892963f860b7 100644
--- a/arch/mips/loongson64/fuloong-2e/irq.c
+++ b/arch/mips/loongson64/fuloong-2e/irq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
3 * Author: Fuxin Zhang, zhangfx@lemote.com 3 * Author: Fuxin Zhang, zhangfx@lemote.com
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c
index 462e34d46b4a..a78fb657068c 100644
--- a/arch/mips/loongson64/lemote-2f/clock.c
+++ b/arch/mips/loongson64/lemote-2f/clock.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology 2 * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
3 * Author: Yanhua, yanh@lemote.com 3 * Author: Yanhua, yanh@lemote.com
4 * 4 *
5 * This file is subject to the terms and conditions of the GNU General Public 5 * This file is subject to the terms and conditions of the GNU General Public
@@ -15,7 +15,7 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16 16
17#include <asm/clock.h> 17#include <asm/clock.h>
18#include <asm/mach-loongson/loongson.h> 18#include <asm/mach-loongson64/loongson.h>
19 19
20static LIST_HEAD(clock_list); 20static LIST_HEAD(clock_list);
21static DEFINE_SPINLOCK(clock_lock); 21static DEFINE_SPINLOCK(clock_lock);
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 12d14ed48778..6f9e010cec4d 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. & 2 * Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
3 * Insititute of Computing Technology 3 * Institute of Computing Technology
4 * Author: Xiang Gao, gaoxiang@ict.ac.cn 4 * Author: Xiang Gao, gaoxiang@ict.ac.cn
5 * Huacai Chen, chenhc@lemote.com 5 * Huacai Chen, chenhc@lemote.com
6 * Xiaofu Meng, Shuangshuang Zhang 6 * Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
266 if (action & SMP_RESCHEDULE_YOURSELF) 266 if (action & SMP_RESCHEDULE_YOURSELF)
267 scheduler_ipi(); 267 scheduler_ipi();
268 268
269 if (action & SMP_CALL_FUNCTION) 269 if (action & SMP_CALL_FUNCTION) {
270 smp_call_function_interrupt(); 270 irq_enter();
271 generic_smp_call_function_interrupt();
272 irq_exit();
273 }
271 274
272 if (action & SMP_ASK_C0COUNT) { 275 if (action & SMP_ASK_C0COUNT) {
273 BUG_ON(cpu != 0); 276 BUG_ON(cpu != 0);
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 22b9b2cb9219..712f17a2ecf2 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
451 /* Fall through */ 451 /* Fall through */
452 case jr_op: 452 case jr_op:
453 /* For R6, JR already emulated in jalr_op */ 453 /* For R6, JR already emulated in jalr_op */
454 if (NO_R6EMU && insn.r_format.opcode == jr_op) 454 if (NO_R6EMU && insn.r_format.func == jr_op)
455 break; 455 break;
456 *contpc = regs->regs[insn.r_format.rs]; 456 *contpc = regs->regs[insn.r_format.rs];
457 return 1; 457 return 1;
@@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
551 dec_insn.next_pc_inc; 551 dec_insn.next_pc_inc;
552 return 1; 552 return 1;
553 case blezl_op: 553 case blezl_op:
554 if (NO_R6EMU) 554 if (!insn.i_format.rt && NO_R6EMU)
555 break; 555 break;
556 case blez_op: 556 case blez_op:
557 557
@@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
588 dec_insn.next_pc_inc; 588 dec_insn.next_pc_inc;
589 return 1; 589 return 1;
590 case bgtzl_op: 590 case bgtzl_op:
591 if (NO_R6EMU) 591 if (!insn.i_format.rt && NO_R6EMU)
592 break; 592 break;
593 case bgtz_op: 593 case bgtz_op:
594 /* 594 /*
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 7f660dc67596..fbea4432f3f2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -37,6 +37,7 @@
37#include <asm/cacheflush.h> /* for run_uncached() */ 37#include <asm/cacheflush.h> /* for run_uncached() */
38#include <asm/traps.h> 38#include <asm/traps.h>
39#include <asm/dma-coherence.h> 39#include <asm/dma-coherence.h>
40#include <asm/mips-cm.h>
40 41
41/* 42/*
42 * Special Variant of smp_call_function for use by cache functions: 43 * Special Variant of smp_call_function for use by cache functions:
@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
51{ 52{
52 preempt_disable(); 53 preempt_disable();
53 54
54#ifndef CONFIG_MIPS_MT_SMP 55 /*
55 smp_call_function(func, info, 1); 56 * The Coherent Manager propagates address-based cache ops to other
56#endif 57 * cores but not index-based ops. However, r4k_on_each_cpu is used
58 * in both cases so there is no easy way to tell what kind of op is
59 * executed to the other cores. The best we can probably do is
60 * to restrict that call when a CM is not present because both
61 * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
62 */
63 if (!mips_cm_present())
64 smp_call_function_many(&cpu_foreign_map, func, info, 1);
57 func(info); 65 func(info);
58 preempt_enable(); 66 preempt_enable();
59} 67}
@@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void)
937} 945}
938 946
939static char *way_string[] = { NULL, "direct mapped", "2-way", 947static char *way_string[] = { NULL, "direct mapped", "2-way",
940 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 948 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
949 "9-way", "10-way", "11-way", "12-way",
950 "13-way", "14-way", "15-way", "16-way",
941}; 951};
942 952
943static void probe_pcache(void) 953static void probe_pcache(void)
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167 167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); 174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176 176
177 } else { 177 } else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
133#endif 133#endif
134 goto bad_area; 134 goto bad_area;
135 } 135 }
136 if (!(vma->vm_flags & VM_READ)) { 136 if (!(vma->vm_flags & VM_READ) &&
137 exception_epc(regs) != address) {
137#if 0 138#if 0
138 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", 139 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
139 raw_smp_processor_id(), 140 raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
222 222
223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
224{ 224{
225 smp_call_function_interrupt(); 225 generic_smp_call_function_interrupt();
226 226
227 return IRQ_HANDLED; 227 return IRQ_HANDLED;
228} 228}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 185e68261f45..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts)
119 119
120int get_c0_fdc_int(void) 120int get_c0_fdc_int(void)
121{ 121{
122 int mips_cpu_fdc_irq; 122 /*
123 * Some cores claim the FDC is routable through the GIC, but it doesn't
124 * actually seem to be connected for those Malta bitstreams.
125 */
126 switch (current_cpu_type()) {
127 case CPU_INTERAPTIV:
128 case CPU_PROAPTIV:
129 return -1;
130 };
123 131
124 if (cpu_has_veic) 132 if (cpu_has_veic)
125 mips_cpu_fdc_irq = -1; 133 return -1;
126 else if (gic_present) 134 else if (gic_present)
127 mips_cpu_fdc_irq = gic_get_c0_fdc_int(); 135 return gic_get_c0_fdc_int();
128 else if (cp0_fdc_irq >= 0) 136 else if (cp0_fdc_irq >= 0)
129 mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 137 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
130 else 138 else
131 mips_cpu_fdc_irq = -1; 139 return -1;
132
133 return mips_cpu_fdc_irq;
134} 140}
135 141
136int get_c0_perfcount_int(void) 142int get_c0_perfcount_int(void)
@@ -148,6 +154,7 @@ int get_c0_perfcount_int(void)
148 154
149 return mips_cpu_perf_irq; 155 return mips_cpu_perf_irq;
150} 156}
157EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
151 158
152unsigned int get_c0_compare_int(void) 159unsigned int get_c0_compare_int(void)
153{ 160{
@@ -165,14 +172,17 @@ unsigned int get_c0_compare_int(void)
165 172
166static void __init init_rtc(void) 173static void __init init_rtc(void)
167{ 174{
168 /* stop the clock whilst setting it up */ 175 unsigned char freq, ctrl;
169 CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
170 176
171 /* 32KHz time base */ 177 /* Set 32KHz time base if not already set */
172 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); 178 freq = CMOS_READ(RTC_FREQ_SELECT);
179 if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
180 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
173 181
174 /* start the clock */ 182 /* Ensure SET bit is clear so RTC can run */
175 CMOS_WRITE(RTC_24H, RTC_CONTROL); 183 ctrl = CMOS_READ(RTC_CONTROL);
184 if (ctrl & RTC_SET)
185 CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
176} 186}
177 187
178void __init plat_time_init(void) 188void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
78 return -1; 78 return -1;
79} 79}
80EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
80 81
81unsigned int get_c0_compare_int(void) 82unsigned int get_c0_compare_int(void)
82{ 83{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
86{ 86{
87 clear_c0_eimr(irq); 87 clear_c0_eimr(irq);
88 ack_c0_eirr(irq); 88 ack_c0_eirr(irq);
89 smp_call_function_interrupt(); 89 generic_smp_call_function_interrupt();
90 set_c0_eimr(irq); 90 set_c0_eimr(irq);
91} 91}
92 92
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
114 114
115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) 115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
116{ 116{
117 smp_call_function_interrupt(); 117 generic_smp_call_function_interrupt();
118 return IRQ_HANDLED; 118 return IRQ_HANDLED;
119} 119}
120 120
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index d2dc836523a3..8bd8ebb20a72 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -63,13 +63,19 @@ void __init plat_mem_setup(void)
63 plat_setup_iocoherency(); 63 plat_setup_iocoherency();
64} 64}
65 65
66#define DEFAULT_CPC_BASE_ADDR 0x1bde0000 66#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
67#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
67 68
68phys_addr_t mips_cpc_default_phys_base(void) 69phys_addr_t mips_cpc_default_phys_base(void)
69{ 70{
70 return DEFAULT_CPC_BASE_ADDR; 71 return DEFAULT_CPC_BASE_ADDR;
71} 72}
72 73
74phys_addr_t mips_cdmm_phys_base(void)
75{
76 return DEFAULT_CDMM_BASE_ADDR;
77}
78
73static void __init mips_nmi_setup(void) 79static void __init mips_nmi_setup(void)
74{ 80{
75 void *base; 81 void *base;
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 67889fcea8aa..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,12 @@ int get_c0_perfcount_int(void)
26{ 26{
27 return gic_get_c0_perfcount_int(); 27 return gic_get_c0_perfcount_int();
28} 28}
29EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
30
31int get_c0_fdc_int(void)
32{
33 return gic_get_c0_fdc_int();
34}
29 35
30void __init plat_time_init(void) 36void __init plat_time_init(void)
31{ 37{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
44 44
45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
46{ 46{
47 smp_call_function_interrupt(); 47 generic_smp_call_function_interrupt();
48 48
49 return IRQ_HANDLED; 49 return IRQ_HANDLED;
50} 50}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
89{ 89{
90 return rt_perfcount_irq; 90 return rt_perfcount_irq;
91} 91}
92EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
92 93
93unsigned int get_c0_compare_int(void) 94unsigned int get_c0_compare_int(void)
94{ 95{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
107 scheduler_ipi(); 107 scheduler_ipi();
108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
110 smp_call_function_interrupt(); 110 irq_enter();
111 generic_smp_call_function_interrupt();
112 irq_exit();
111 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 113 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
112 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 114 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
113 smp_call_function_interrupt(); 115 irq_enter();
116 generic_smp_call_function_interrupt();
117 irq_exit();
114 } else 118 } else
115#endif 119#endif
116 { 120 {
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index a8bb972fd9fd..cb9a095f5c5e 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -81,11 +81,6 @@ choice
81 prompt "SiByte SOC Stepping" 81 prompt "SiByte SOC Stepping"
82 depends on SIBYTE_SB1xxx_SOC 82 depends on SIBYTE_SB1xxx_SOC
83 83
84config CPU_SB1_PASS_1
85 bool "1250 Pass1"
86 depends on SIBYTE_SB1250
87 select CPU_HAS_PREFETCH
88
89config CPU_SB1_PASS_2_1250 84config CPU_SB1_PASS_2_1250
90 bool "1250 An" 85 bool "1250 An"
91 depends on SIBYTE_SB1250 86 depends on SIBYTE_SB1250
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
29#include <asm/sibyte/bcm1480_regs.h> 29#include <asm/sibyte/bcm1480_regs.h>
30#include <asm/sibyte/bcm1480_int.h> 30#include <asm/sibyte/bcm1480_int.h>
31 31
32extern void smp_call_function_interrupt(void);
33
34/* 32/*
35 * These are routines for dealing with the bcm1480 smp capabilities 33 * These are routines for dealing with the bcm1480 smp capabilities
36 * independent of board/firmware 34 * independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
184 if (action & SMP_RESCHEDULE_YOURSELF) 182 if (action & SMP_RESCHEDULE_YOURSELF)
185 scheduler_ipi(); 183 scheduler_ipi();
186 184
187 if (action & SMP_CALL_FUNCTION) 185 if (action & SMP_CALL_FUNCTION) {
188 smp_call_function_interrupt(); 186 irq_enter();
187 generic_smp_call_function_interrupt();
188 irq_exit();
189 }
189} 190}
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
index 5581844c9194..41a1d2242211 100644
--- a/arch/mips/sibyte/common/bus_watcher.c
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -81,10 +81,7 @@ void check_bus_watcher(void)
81{ 81{
82 u32 status, l2_err, memio_err; 82 u32 status, l2_err, memio_err;
83 83
84#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS 84#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
85 /* Destructive read, clears register and interrupt */
86 status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
87#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
88 /* Use non-destructive register */ 85 /* Use non-destructive register */
89 status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG)); 86 status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG));
90#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) 87#elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 3c02b2a77ae9..9d3c24efdf4a 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -202,12 +202,10 @@ void __init sb1250_setup(void)
202 202
203 switch (war_pass) { 203 switch (war_pass) {
204 case K_SYS_REVISION_BCM1250_PASS1: 204 case K_SYS_REVISION_BCM1250_PASS1:
205#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
206 printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, " 205 printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
207 "and the kernel doesn't have the proper " 206 "and the kernel doesn't have the proper "
208 "workarounds compiled in. @@@@\n"); 207 "workarounds compiled in. @@@@\n");
209 bad_config = 1; 208 bad_config = 1;
210#endif
211 break; 209 break;
212 case K_SYS_REVISION_BCM1250_PASS2: 210 case K_SYS_REVISION_BCM1250_PASS2:
213 /* Pass 2 - easiest as default for now - so many numbers */ 211 /* Pass 2 - easiest as default for now - so many numbers */
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
172 if (action & SMP_RESCHEDULE_YOURSELF) 172 if (action & SMP_RESCHEDULE_YOURSELF)
173 scheduler_ipi(); 173 scheduler_ipi();
174 174
175 if (action & SMP_CALL_FUNCTION) 175 if (action & SMP_CALL_FUNCTION) {
176 smp_call_function_interrupt(); 176 irq_enter();
177 generic_smp_call_function_interrupt();
178 irq_exit();
179 }
177} 180}
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index de30b0c88796..6edb9ee6128e 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cputime.h
5generic-y += exec.h 5generic-y += exec.h
6generic-y += irq_work.h 6generic-y += irq_work.h
7generic-y += mcs_spinlock.h 7generic-y += mcs_spinlock.h
8generic-y += mm-arch-hooks.h
8generic-y += preempt.h 9generic-y += preempt.h
9generic-y += sections.h 10generic-y += sections.h
10generic-y += trace_clock.h 11generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/mm-arch-hooks.h b/arch/mn10300/include/asm/mm-arch-hooks.h
deleted file mode 100644
index e2029a652f4c..000000000000
--- a/arch/mn10300/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_MN10300_MM_ARCH_HOOKS_H
13#define _ASM_MN10300_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_MN10300_MM_ARCH_HOOKS_H */
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 434639d510b3..914864eb5a25 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -30,6 +30,7 @@ generic-y += kmap_types.h
30generic-y += kvm_para.h 30generic-y += kvm_para.h
31generic-y += local.h 31generic-y += local.h
32generic-y += mcs_spinlock.h 32generic-y += mcs_spinlock.h
33generic-y += mm-arch-hooks.h
33generic-y += mman.h 34generic-y += mman.h
34generic-y += module.h 35generic-y += module.h
35generic-y += msgbuf.h 36generic-y += msgbuf.h
diff --git a/arch/nios2/include/asm/mm-arch-hooks.h b/arch/nios2/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d7290dc68558..000000000000
--- a/arch/nios2/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_NIOS2_MM_ARCH_HOOKS_H
13#define _ASM_NIOS2_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_NIOS2_MM_ARCH_HOOKS_H */
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index e5a693b16da2..443f44de1020 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -17,6 +17,7 @@ config OPENRISC
17 select GENERIC_IRQ_SHOW 17 select GENERIC_IRQ_SHOW
18 select GENERIC_IOMAP 18 select GENERIC_IOMAP
19 select GENERIC_CPU_DEVICES 19 select GENERIC_CPU_DEVICES
20 select HAVE_UID16
20 select GENERIC_ATOMIC64 21 select GENERIC_ATOMIC64
21 select GENERIC_CLOCKEVENTS 22 select GENERIC_CLOCKEVENTS
22 select GENERIC_STRNCPY_FROM_USER 23 select GENERIC_STRNCPY_FROM_USER
@@ -31,9 +32,6 @@ config MMU
31config HAVE_DMA_ATTRS 32config HAVE_DMA_ATTRS
32 def_bool y 33 def_bool y
33 34
34config UID16
35 def_bool y
36
37config RWSEM_GENERIC_SPINLOCK 35config RWSEM_GENERIC_SPINLOCK
38 def_bool y 36 def_bool y
39 37
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 2a2e39b8109a..2832f031fb11 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -36,6 +36,7 @@ generic-y += kmap_types.h
36generic-y += kvm_para.h 36generic-y += kvm_para.h
37generic-y += local.h 37generic-y += local.h
38generic-y += mcs_spinlock.h 38generic-y += mcs_spinlock.h
39generic-y += mm-arch-hooks.h
39generic-y += mman.h 40generic-y += mman.h
40generic-y += module.h 41generic-y += module.h
41generic-y += msgbuf.h 42generic-y += msgbuf.h
diff --git a/arch/openrisc/include/asm/mm-arch-hooks.h b/arch/openrisc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 6d33cb555fe1..000000000000
--- a/arch/openrisc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_OPENRISC_MM_ARCH_HOOKS_H
13#define _ASM_OPENRISC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_OPENRISC_MM_ARCH_HOOKS_H */
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 12b341d04f88..f9b3a81aefcd 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += kvm_para.h
15generic-y += local.h 15generic-y += local.h
16generic-y += local64.h 16generic-y += local64.h
17generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
18generic-y += mm-arch-hooks.h
18generic-y += mutex.h 19generic-y += mutex.h
19generic-y += param.h 20generic-y += param.h
20generic-y += percpu.h 21generic-y += percpu.h
diff --git a/arch/parisc/include/asm/mm-arch-hooks.h b/arch/parisc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 654ec63b0ee9..000000000000
--- a/arch/parisc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_PARISC_MM_ARCH_HOOKS_H
13#define _ASM_PARISC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_PARISC_MM_ARCH_HOOKS_H */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3a08eae3318f..3edbb9fc91b4 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) 73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{ 74{
75 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 75 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76 /* 76 /*
77 * This is the permanent pmd attached to the pgd; 77 * This is the permanent pmd attached to the pgd;
78 * cannot free it. 78 * cannot free it.
@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
81 */ 81 */
82 mm_inc_nr_pmds(mm); 82 mm_inc_nr_pmds(mm);
83 return; 83 return;
84 }
84 free_pages((unsigned long)pmd, PMD_ORDER); 85 free_pages((unsigned long)pmd, PMD_ORDER);
85} 86}
86 87
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 0a183756d6ec..f93c4a4e6580 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,7 +16,7 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cache.h> 17#include <asm/cache.h>
18 18
19extern spinlock_t pa_dbit_lock; 19extern spinlock_t pa_tlb_lock;
20 20
21/* 21/*
22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
33 */ 33 */
34#define kern_addr_valid(addr) (1) 34#define kern_addr_valid(addr) (1)
35 35
36/* Purge data and instruction TLB entries. Must be called holding
37 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
38 * machines since the purge must be broadcast to all CPUs.
39 */
40
41static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
42{
43 mtsp(mm->context, 1);
44 pdtlb(addr);
45 if (unlikely(split_tlb))
46 pitlb(addr);
47}
48
36/* Certain architectures need to do special things when PTEs 49/* Certain architectures need to do special things when PTEs
37 * within a page table are directly modified. Thus, the following 50 * within a page table are directly modified. Thus, the following
38 * hook is made available. 51 * hook is made available.
@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
42 *(pteptr) = (pteval); \ 55 *(pteptr) = (pteval); \
43 } while(0) 56 } while(0)
44 57
45extern void purge_tlb_entries(struct mm_struct *, unsigned long); 58#define pte_inserted(x) \
59 ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
60 == (_PAGE_PRESENT|_PAGE_ACCESSED))
46 61
47#define set_pte_at(mm, addr, ptep, pteval) \ 62#define set_pte_at(mm, addr, ptep, pteval) \
48 do { \ 63 do { \
64 pte_t old_pte; \
49 unsigned long flags; \ 65 unsigned long flags; \
50 spin_lock_irqsave(&pa_dbit_lock, flags); \ 66 spin_lock_irqsave(&pa_tlb_lock, flags); \
51 set_pte(ptep, pteval); \ 67 old_pte = *ptep; \
52 purge_tlb_entries(mm, addr); \ 68 set_pte(ptep, pteval); \
53 spin_unlock_irqrestore(&pa_dbit_lock, flags); \ 69 if (pte_inserted(old_pte)) \
70 purge_tlb_entries(mm, addr); \
71 spin_unlock_irqrestore(&pa_tlb_lock, flags); \
54 } while (0) 72 } while (0)
55 73
56#endif /* !__ASSEMBLY__ */ 74#endif /* !__ASSEMBLY__ */
@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
268 286
269#define pte_none(x) (pte_val(x) == 0) 287#define pte_none(x) (pte_val(x) == 0)
270#define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 288#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
271#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) 289#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
272 290
273#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 291#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
274#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 292#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
435 if (!pte_young(*ptep)) 453 if (!pte_young(*ptep))
436 return 0; 454 return 0;
437 455
438 spin_lock_irqsave(&pa_dbit_lock, flags); 456 spin_lock_irqsave(&pa_tlb_lock, flags);
439 pte = *ptep; 457 pte = *ptep;
440 if (!pte_young(pte)) { 458 if (!pte_young(pte)) {
441 spin_unlock_irqrestore(&pa_dbit_lock, flags); 459 spin_unlock_irqrestore(&pa_tlb_lock, flags);
442 return 0; 460 return 0;
443 } 461 }
444 set_pte(ptep, pte_mkold(pte)); 462 set_pte(ptep, pte_mkold(pte));
445 purge_tlb_entries(vma->vm_mm, addr); 463 purge_tlb_entries(vma->vm_mm, addr);
446 spin_unlock_irqrestore(&pa_dbit_lock, flags); 464 spin_unlock_irqrestore(&pa_tlb_lock, flags);
447 return 1; 465 return 1;
448} 466}
449 467
@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
453 pte_t old_pte; 471 pte_t old_pte;
454 unsigned long flags; 472 unsigned long flags;
455 473
456 spin_lock_irqsave(&pa_dbit_lock, flags); 474 spin_lock_irqsave(&pa_tlb_lock, flags);
457 old_pte = *ptep; 475 old_pte = *ptep;
458 pte_clear(mm,addr,ptep); 476 set_pte(ptep, __pte(0));
459 purge_tlb_entries(mm, addr); 477 if (pte_inserted(old_pte))
460 spin_unlock_irqrestore(&pa_dbit_lock, flags); 478 purge_tlb_entries(mm, addr);
479 spin_unlock_irqrestore(&pa_tlb_lock, flags);
461 480
462 return old_pte; 481 return old_pte;
463} 482}
@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
465static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 484static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
466{ 485{
467 unsigned long flags; 486 unsigned long flags;
468 spin_lock_irqsave(&pa_dbit_lock, flags); 487 spin_lock_irqsave(&pa_tlb_lock, flags);
469 set_pte(ptep, pte_wrprotect(*ptep)); 488 set_pte(ptep, pte_wrprotect(*ptep));
470 purge_tlb_entries(mm, addr); 489 purge_tlb_entries(mm, addr);
471 spin_unlock_irqrestore(&pa_dbit_lock, flags); 490 spin_unlock_irqrestore(&pa_tlb_lock, flags);
472} 491}
473 492
474#define pte_same(A,B) (pte_val(A) == pte_val(B)) 493#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 9d086a599fa0..e84b96478193 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -13,6 +13,9 @@
13 * active at any one time on the Merced bus. This tlb purge 13 * active at any one time on the Merced bus. This tlb purge
14 * synchronisation is fairly lightweight and harmless so we activate 14 * synchronisation is fairly lightweight and harmless so we activate
15 * it on all systems not just the N class. 15 * it on all systems not just the N class.
16
17 * It is also used to ensure PTE updates are atomic and consistent
18 * with the TLB.
16 */ 19 */
17extern spinlock_t pa_tlb_lock; 20extern spinlock_t pa_tlb_lock;
18 21
@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
24 27
25#define smp_flush_tlb_all() flush_tlb_all() 28#define smp_flush_tlb_all() flush_tlb_all()
26 29
30int __flush_tlb_range(unsigned long sid,
31 unsigned long start, unsigned long end);
32
33#define flush_tlb_range(vma, start, end) \
34 __flush_tlb_range((vma)->vm_mm->context, start, end)
35
36#define flush_tlb_kernel_range(start, end) \
37 __flush_tlb_range(0, start, end)
38
27/* 39/*
28 * flush_tlb_mm() 40 * flush_tlb_mm()
29 * 41 *
30 * XXX This code is NOT valid for HP-UX compatibility processes, 42 * The code to switch to a new context is NOT valid for processes
31 * (although it will probably work 99% of the time). HP-UX 43 * which play with the space id's. Thus, we have to preserve the
32 * processes are free to play with the space id's and save them 44 * space and just flush the entire tlb. However, the compilers,
33 * over long periods of time, etc. so we have to preserve the 45 * dynamic linker, etc, do not manipulate space id's, so there
34 * space and just flush the entire tlb. We need to check the 46 * could be a significant performance benefit in switching contexts
35 * personality in order to do that, but the personality is not 47 * and not flushing the whole tlb.
36 * currently being set correctly.
37 *
38 * Of course, Linux processes could do the same thing, but
39 * we don't support that (and the compilers, dynamic linker,
40 * etc. do not do that).
41 */ 48 */
42 49
43static inline void flush_tlb_mm(struct mm_struct *mm) 50static inline void flush_tlb_mm(struct mm_struct *mm)
@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
45 BUG_ON(mm == &init_mm); /* Should never happen */ 52 BUG_ON(mm == &init_mm); /* Should never happen */
46 53
47#if 1 || defined(CONFIG_SMP) 54#if 1 || defined(CONFIG_SMP)
55 /* Except for very small threads, flushing the whole TLB is
56 * faster than using __flush_tlb_range. The pdtlb and pitlb
57 * instructions are very slow because of the TLB broadcast.
58 * It might be faster to do local range flushes on all CPUs
59 * on PA 2.0 systems.
60 */
48 flush_tlb_all(); 61 flush_tlb_all();
49#else 62#else
50 /* FIXME: currently broken, causing space id and protection ids 63 /* FIXME: currently broken, causing space id and protection ids
51 * to go out of sync, resulting in faults on userspace accesses. 64 * to go out of sync, resulting in faults on userspace accesses.
65 * This approach needs further investigation since running many
66 * small applications (e.g., GCC testsuite) is faster on HP-UX.
52 */ 67 */
53 if (mm) { 68 if (mm) {
54 if (mm->context != 0) 69 if (mm->context != 0)
@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
65{ 80{
66 unsigned long flags, sid; 81 unsigned long flags, sid;
67 82
68 /* For one page, it's not worth testing the split_tlb variable */
69
70 mb();
71 sid = vma->vm_mm->context; 83 sid = vma->vm_mm->context;
72 purge_tlb_start(flags); 84 purge_tlb_start(flags);
73 mtsp(sid, 1); 85 mtsp(sid, 1);
74 pdtlb(addr); 86 pdtlb(addr);
75 pitlb(addr); 87 if (unlikely(split_tlb))
88 pitlb(addr);
76 purge_tlb_end(flags); 89 purge_tlb_end(flags);
77} 90}
78
79void __flush_tlb_range(unsigned long sid,
80 unsigned long start, unsigned long end);
81
82#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
83
84#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
85
86#endif 91#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index f6448c7c62b5..cda6dbbe9842 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
342EXPORT_SYMBOL(flush_kernel_icache_range_asm); 342EXPORT_SYMBOL(flush_kernel_icache_range_asm);
343 343
344#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 344#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
345int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 345static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
346
347#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
348static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
346 349
347void __init parisc_setup_cache_timing(void) 350void __init parisc_setup_cache_timing(void)
348{ 351{
349 unsigned long rangetime, alltime; 352 unsigned long rangetime, alltime;
350 unsigned long size; 353 unsigned long size, start;
351 354
352 alltime = mfctl(16); 355 alltime = mfctl(16);
353 flush_data_cache(); 356 flush_data_cache();
@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
364 /* Racy, but if we see an intermediate value, it's ok too... */ 367 /* Racy, but if we see an intermediate value, it's ok too... */
365 parisc_cache_flush_threshold = size * alltime / rangetime; 368 parisc_cache_flush_threshold = size * alltime / rangetime;
366 369
367 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 370 parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
368 if (!parisc_cache_flush_threshold) 371 if (!parisc_cache_flush_threshold)
369 parisc_cache_flush_threshold = FLUSH_THRESHOLD; 372 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
370 373
371 if (parisc_cache_flush_threshold > cache_info.dc_size) 374 if (parisc_cache_flush_threshold > cache_info.dc_size)
372 parisc_cache_flush_threshold = cache_info.dc_size; 375 parisc_cache_flush_threshold = cache_info.dc_size;
373 376
374 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 377 printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
378 parisc_cache_flush_threshold/1024);
379
380 /* calculate TLB flush threshold */
381
382 alltime = mfctl(16);
383 flush_tlb_all();
384 alltime = mfctl(16) - alltime;
385
386 size = PAGE_SIZE;
387 start = (unsigned long) _text;
388 rangetime = mfctl(16);
389 while (start < (unsigned long) _end) {
390 flush_tlb_kernel_range(start, start + PAGE_SIZE);
391 start += PAGE_SIZE;
392 size += PAGE_SIZE;
393 }
394 rangetime = mfctl(16) - rangetime;
395
396 printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
397 alltime, size, rangetime);
398
399 parisc_tlb_flush_threshold = size * alltime / rangetime;
400 parisc_tlb_flush_threshold *= num_online_cpus();
401 parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
402 if (!parisc_tlb_flush_threshold)
403 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
404
405 printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
406 parisc_tlb_flush_threshold/1024);
375} 407}
376 408
377extern void purge_kernel_dcache_page_asm(unsigned long); 409extern void purge_kernel_dcache_page_asm(unsigned long);
@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
403} 435}
404EXPORT_SYMBOL(copy_user_page); 436EXPORT_SYMBOL(copy_user_page);
405 437
406void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 438/* __flush_tlb_range()
407{ 439 *
408 unsigned long flags; 440 * returns 1 if all TLBs were flushed.
409 441 */
410 /* Note: purge_tlb_entries can be called at startup with 442int __flush_tlb_range(unsigned long sid, unsigned long start,
411 no context. */ 443 unsigned long end)
412
413 purge_tlb_start(flags);
414 mtsp(mm->context, 1);
415 pdtlb(addr);
416 pitlb(addr);
417 purge_tlb_end(flags);
418}
419EXPORT_SYMBOL(purge_tlb_entries);
420
421void __flush_tlb_range(unsigned long sid, unsigned long start,
422 unsigned long end)
423{ 444{
424 unsigned long npages; 445 unsigned long flags, size;
425 446
426 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 447 size = (end - start);
427 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 448 if (size >= parisc_tlb_flush_threshold) {
428 flush_tlb_all(); 449 flush_tlb_all();
429 else { 450 return 1;
430 unsigned long flags; 451 }
431 452
453 /* Purge TLB entries for small ranges using the pdtlb and
454 pitlb instructions. These instructions execute locally
455 but cause a purge request to be broadcast to other TLBs. */
456 if (likely(!split_tlb)) {
457 while (start < end) {
458 purge_tlb_start(flags);
459 mtsp(sid, 1);
460 pdtlb(start);
461 purge_tlb_end(flags);
462 start += PAGE_SIZE;
463 }
464 return 0;
465 }
466
467 /* split TLB case */
468 while (start < end) {
432 purge_tlb_start(flags); 469 purge_tlb_start(flags);
433 mtsp(sid, 1); 470 mtsp(sid, 1);
434 if (split_tlb) { 471 pdtlb(start);
435 while (npages--) { 472 pitlb(start);
436 pdtlb(start);
437 pitlb(start);
438 start += PAGE_SIZE;
439 }
440 } else {
441 while (npages--) {
442 pdtlb(start);
443 start += PAGE_SIZE;
444 }
445 }
446 purge_tlb_end(flags); 473 purge_tlb_end(flags);
474 start += PAGE_SIZE;
447 } 475 }
476 return 0;
448} 477}
449 478
450static void cacheflush_h_tmp_function(void *dummy) 479static void cacheflush_h_tmp_function(void *dummy)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 75819617f93b..c5ef4081b01d 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -45,7 +45,7 @@
45 .level 2.0 45 .level 2.0
46#endif 46#endif
47 47
48 .import pa_dbit_lock,data 48 .import pa_tlb_lock,data
49 49
50 /* space_to_prot macro creates a prot id from a space id */ 50 /* space_to_prot macro creates a prot id from a space id */
51 51
@@ -420,8 +420,8 @@
420 SHLREG %r9,PxD_VALUE_SHIFT,\pmd 420 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
421 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 421 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
422 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 422 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
423 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 423 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
424 LDREG %r0(\pmd),\pte /* pmd is now pte */ 424 LDREG %r0(\pmd),\pte
425 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 425 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
426 .endm 426 .endm
427 427
@@ -453,57 +453,53 @@
453 L2_ptep \pgd,\pte,\index,\va,\fault 453 L2_ptep \pgd,\pte,\index,\va,\fault
454 .endm 454 .endm
455 455
456 /* Acquire pa_dbit_lock lock. */ 456 /* Acquire pa_tlb_lock lock and recheck page is still present. */
457 .macro dbit_lock spc,tmp,tmp1 457 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
458#ifdef CONFIG_SMP 458#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f 459 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_dbit_lock),\tmp 460 load32 PA(pa_tlb_lock),\tmp
4611: LDCW 0(\tmp),\tmp1 4611: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b 462 cmpib,COND(=) 0,\tmp1,1b
463 nop 463 nop
464 LDREG 0(\ptp),\pte
465 bb,<,n \pte,_PAGE_PRESENT_BIT,2f
466 b \fault
467 stw \spc,0(\tmp)
4642: 4682:
465#endif 469#endif
466 .endm 470 .endm
467 471
468 /* Release pa_dbit_lock lock without reloading lock address. */ 472 /* Release pa_tlb_lock lock without reloading lock address. */
469 .macro dbit_unlock0 spc,tmp 473 .macro tlb_unlock0 spc,tmp
470#ifdef CONFIG_SMP 474#ifdef CONFIG_SMP
471 or,COND(=) %r0,\spc,%r0 475 or,COND(=) %r0,\spc,%r0
472 stw \spc,0(\tmp) 476 stw \spc,0(\tmp)
473#endif 477#endif
474 .endm 478 .endm
475 479
476 /* Release pa_dbit_lock lock. */ 480 /* Release pa_tlb_lock lock. */
477 .macro dbit_unlock1 spc,tmp 481 .macro tlb_unlock1 spc,tmp
478#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP
479 load32 PA(pa_dbit_lock),\tmp 483 load32 PA(pa_tlb_lock),\tmp
480 dbit_unlock0 \spc,\tmp 484 tlb_unlock0 \spc,\tmp
481#endif 485#endif
482 .endm 486 .endm
483 487
484 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 488 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
485 * don't needlessly dirty the cache line if it was already set */ 489 * don't needlessly dirty the cache line if it was already set */
486 .macro update_ptep spc,ptep,pte,tmp,tmp1 490 .macro update_accessed ptp,pte,tmp,tmp1
487#ifdef CONFIG_SMP
488 or,COND(=) %r0,\spc,%r0
489 LDREG 0(\ptep),\pte
490#endif
491 ldi _PAGE_ACCESSED,\tmp1 491 ldi _PAGE_ACCESSED,\tmp1
492 or \tmp1,\pte,\tmp 492 or \tmp1,\pte,\tmp
493 and,COND(<>) \tmp1,\pte,%r0 493 and,COND(<>) \tmp1,\pte,%r0
494 STREG \tmp,0(\ptep) 494 STREG \tmp,0(\ptp)
495 .endm 495 .endm
496 496
497 /* Set the dirty bit (and accessed bit). No need to be 497 /* Set the dirty bit (and accessed bit). No need to be
498 * clever, this is only used from the dirty fault */ 498 * clever, this is only used from the dirty fault */
499 .macro update_dirty spc,ptep,pte,tmp 499 .macro update_dirty ptp,pte,tmp
500#ifdef CONFIG_SMP
501 or,COND(=) %r0,\spc,%r0
502 LDREG 0(\ptep),\pte
503#endif
504 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 500 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
505 or \tmp,\pte,\pte 501 or \tmp,\pte,\pte
506 STREG \pte,0(\ptep) 502 STREG \pte,0(\ptp)
507 .endm 503 .endm
508 504
509 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) 505 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
1148 1144
1149 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1145 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1150 1146
1151 dbit_lock spc,t0,t1 1147 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1152 update_ptep spc,ptp,pte,t0,t1 1148 update_accessed ptp,pte,t0,t1
1153 1149
1154 make_insert_tlb spc,pte,prot 1150 make_insert_tlb spc,pte,prot
1155 1151
1156 idtlbt pte,prot 1152 idtlbt pte,prot
1157 dbit_unlock1 spc,t0
1158 1153
1154 tlb_unlock1 spc,t0
1159 rfir 1155 rfir
1160 nop 1156 nop
1161 1157
@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
1174 1170
1175 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1171 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1176 1172
1177 dbit_lock spc,t0,t1 1173 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1178 update_ptep spc,ptp,pte,t0,t1 1174 update_accessed ptp,pte,t0,t1
1179 1175
1180 make_insert_tlb spc,pte,prot 1176 make_insert_tlb spc,pte,prot
1181 1177
1182 idtlbt pte,prot 1178 idtlbt pte,prot
1183 dbit_unlock1 spc,t0
1184 1179
1180 tlb_unlock1 spc,t0
1185 rfir 1181 rfir
1186 nop 1182 nop
1187 1183
@@ -1202,20 +1198,20 @@ dtlb_miss_11:
1202 1198
1203 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1199 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1204 1200
1205 dbit_lock spc,t0,t1 1201 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1206 update_ptep spc,ptp,pte,t0,t1 1202 update_accessed ptp,pte,t0,t1
1207 1203
1208 make_insert_tlb_11 spc,pte,prot 1204 make_insert_tlb_11 spc,pte,prot
1209 1205
1210 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1206 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1211 mtsp spc,%sr1 1207 mtsp spc,%sr1
1212 1208
1213 idtlba pte,(%sr1,va) 1209 idtlba pte,(%sr1,va)
1214 idtlbp prot,(%sr1,va) 1210 idtlbp prot,(%sr1,va)
1215 1211
1216 mtsp t0, %sr1 /* Restore sr1 */ 1212 mtsp t1, %sr1 /* Restore sr1 */
1217 dbit_unlock1 spc,t0
1218 1213
1214 tlb_unlock1 spc,t0
1219 rfir 1215 rfir
1220 nop 1216 nop
1221 1217
@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
1235 1231
1236 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1232 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1237 1233
1238 dbit_lock spc,t0,t1 1234 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1239 update_ptep spc,ptp,pte,t0,t1 1235 update_accessed ptp,pte,t0,t1
1240 1236
1241 make_insert_tlb_11 spc,pte,prot 1237 make_insert_tlb_11 spc,pte,prot
1242 1238
1243 1239 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1244 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1245 mtsp spc,%sr1 1240 mtsp spc,%sr1
1246 1241
1247 idtlba pte,(%sr1,va) 1242 idtlba pte,(%sr1,va)
1248 idtlbp prot,(%sr1,va) 1243 idtlbp prot,(%sr1,va)
1249 1244
1250 mtsp t0, %sr1 /* Restore sr1 */ 1245 mtsp t1, %sr1 /* Restore sr1 */
1251 dbit_unlock1 spc,t0
1252 1246
1247 tlb_unlock1 spc,t0
1253 rfir 1248 rfir
1254 nop 1249 nop
1255 1250
@@ -1269,16 +1264,16 @@ dtlb_miss_20:
1269 1264
1270 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1265 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1271 1266
1272 dbit_lock spc,t0,t1 1267 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1273 update_ptep spc,ptp,pte,t0,t1 1268 update_accessed ptp,pte,t0,t1
1274 1269
1275 make_insert_tlb spc,pte,prot 1270 make_insert_tlb spc,pte,prot
1276 1271
1277 f_extend pte,t0 1272 f_extend pte,t1
1278 1273
1279 idtlbt pte,prot 1274 idtlbt pte,prot
1280 dbit_unlock1 spc,t0
1281 1275
1276 tlb_unlock1 spc,t0
1282 rfir 1277 rfir
1283 nop 1278 nop
1284 1279
@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
1297 1292
1298 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1293 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1299 1294
1300 dbit_lock spc,t0,t1 1295 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1301 update_ptep spc,ptp,pte,t0,t1 1296 update_accessed ptp,pte,t0,t1
1302 1297
1303 make_insert_tlb spc,pte,prot 1298 make_insert_tlb spc,pte,prot
1304 1299
1305 f_extend pte,t0 1300 f_extend pte,t1
1306 1301
1307 idtlbt pte,prot 1302 idtlbt pte,prot
1308 dbit_unlock1 spc,t0
1309 1303
1304 tlb_unlock1 spc,t0
1310 rfir 1305 rfir
1311 nop 1306 nop
1312 1307
@@ -1406,14 +1401,14 @@ itlb_miss_20w:
1406 1401
1407 L3_ptep ptp,pte,t0,va,itlb_fault 1402 L3_ptep ptp,pte,t0,va,itlb_fault
1408 1403
1409 dbit_lock spc,t0,t1 1404 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1410 update_ptep spc,ptp,pte,t0,t1 1405 update_accessed ptp,pte,t0,t1
1411 1406
1412 make_insert_tlb spc,pte,prot 1407 make_insert_tlb spc,pte,prot
1413 1408
1414 iitlbt pte,prot 1409 iitlbt pte,prot
1415 dbit_unlock1 spc,t0
1416 1410
1411 tlb_unlock1 spc,t0
1417 rfir 1412 rfir
1418 nop 1413 nop
1419 1414
@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
1430 1425
1431 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1426 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1432 1427
1433 dbit_lock spc,t0,t1 1428 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1434 update_ptep spc,ptp,pte,t0,t1 1429 update_accessed ptp,pte,t0,t1
1435 1430
1436 make_insert_tlb spc,pte,prot 1431 make_insert_tlb spc,pte,prot
1437 1432
1438 iitlbt pte,prot 1433 iitlbt pte,prot
1439 dbit_unlock1 spc,t0
1440 1434
1435 tlb_unlock1 spc,t0
1441 rfir 1436 rfir
1442 nop 1437 nop
1443 1438
@@ -1458,20 +1453,20 @@ itlb_miss_11:
1458 1453
1459 L2_ptep ptp,pte,t0,va,itlb_fault 1454 L2_ptep ptp,pte,t0,va,itlb_fault
1460 1455
1461 dbit_lock spc,t0,t1 1456 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1462 update_ptep spc,ptp,pte,t0,t1 1457 update_accessed ptp,pte,t0,t1
1463 1458
1464 make_insert_tlb_11 spc,pte,prot 1459 make_insert_tlb_11 spc,pte,prot
1465 1460
1466 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1461 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1467 mtsp spc,%sr1 1462 mtsp spc,%sr1
1468 1463
1469 iitlba pte,(%sr1,va) 1464 iitlba pte,(%sr1,va)
1470 iitlbp prot,(%sr1,va) 1465 iitlbp prot,(%sr1,va)
1471 1466
1472 mtsp t0, %sr1 /* Restore sr1 */ 1467 mtsp t1, %sr1 /* Restore sr1 */
1473 dbit_unlock1 spc,t0
1474 1468
1469 tlb_unlock1 spc,t0
1475 rfir 1470 rfir
1476 nop 1471 nop
1477 1472
@@ -1482,20 +1477,20 @@ naitlb_miss_11:
1482 1477
1483 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1478 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1484 1479
1485 dbit_lock spc,t0,t1 1480 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1486 update_ptep spc,ptp,pte,t0,t1 1481 update_accessed ptp,pte,t0,t1
1487 1482
1488 make_insert_tlb_11 spc,pte,prot 1483 make_insert_tlb_11 spc,pte,prot
1489 1484
1490 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1485 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1491 mtsp spc,%sr1 1486 mtsp spc,%sr1
1492 1487
1493 iitlba pte,(%sr1,va) 1488 iitlba pte,(%sr1,va)
1494 iitlbp prot,(%sr1,va) 1489 iitlbp prot,(%sr1,va)
1495 1490
1496 mtsp t0, %sr1 /* Restore sr1 */ 1491 mtsp t1, %sr1 /* Restore sr1 */
1497 dbit_unlock1 spc,t0
1498 1492
1493 tlb_unlock1 spc,t0
1499 rfir 1494 rfir
1500 nop 1495 nop
1501 1496
@@ -1516,16 +1511,16 @@ itlb_miss_20:
1516 1511
1517 L2_ptep ptp,pte,t0,va,itlb_fault 1512 L2_ptep ptp,pte,t0,va,itlb_fault
1518 1513
1519 dbit_lock spc,t0,t1 1514 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1520 update_ptep spc,ptp,pte,t0,t1 1515 update_accessed ptp,pte,t0,t1
1521 1516
1522 make_insert_tlb spc,pte,prot 1517 make_insert_tlb spc,pte,prot
1523 1518
1524 f_extend pte,t0 1519 f_extend pte,t1
1525 1520
1526 iitlbt pte,prot 1521 iitlbt pte,prot
1527 dbit_unlock1 spc,t0
1528 1522
1523 tlb_unlock1 spc,t0
1529 rfir 1524 rfir
1530 nop 1525 nop
1531 1526
@@ -1536,16 +1531,16 @@ naitlb_miss_20:
1536 1531
1537 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1532 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1538 1533
1539 dbit_lock spc,t0,t1 1534 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1540 update_ptep spc,ptp,pte,t0,t1 1535 update_accessed ptp,pte,t0,t1
1541 1536
1542 make_insert_tlb spc,pte,prot 1537 make_insert_tlb spc,pte,prot
1543 1538
1544 f_extend pte,t0 1539 f_extend pte,t1
1545 1540
1546 iitlbt pte,prot 1541 iitlbt pte,prot
1547 dbit_unlock1 spc,t0
1548 1542
1543 tlb_unlock1 spc,t0
1549 rfir 1544 rfir
1550 nop 1545 nop
1551 1546
@@ -1568,14 +1563,14 @@ dbit_trap_20w:
1568 1563
1569 L3_ptep ptp,pte,t0,va,dbit_fault 1564 L3_ptep ptp,pte,t0,va,dbit_fault
1570 1565
1571 dbit_lock spc,t0,t1 1566 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1572 update_dirty spc,ptp,pte,t1 1567 update_dirty ptp,pte,t1
1573 1568
1574 make_insert_tlb spc,pte,prot 1569 make_insert_tlb spc,pte,prot
1575 1570
1576 idtlbt pte,prot 1571 idtlbt pte,prot
1577 dbit_unlock0 spc,t0
1578 1572
1573 tlb_unlock0 spc,t0
1579 rfir 1574 rfir
1580 nop 1575 nop
1581#else 1576#else
@@ -1588,8 +1583,8 @@ dbit_trap_11:
1588 1583
1589 L2_ptep ptp,pte,t0,va,dbit_fault 1584 L2_ptep ptp,pte,t0,va,dbit_fault
1590 1585
1591 dbit_lock spc,t0,t1 1586 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1592 update_dirty spc,ptp,pte,t1 1587 update_dirty ptp,pte,t1
1593 1588
1594 make_insert_tlb_11 spc,pte,prot 1589 make_insert_tlb_11 spc,pte,prot
1595 1590
@@ -1600,8 +1595,8 @@ dbit_trap_11:
1600 idtlbp prot,(%sr1,va) 1595 idtlbp prot,(%sr1,va)
1601 1596
1602 mtsp t1, %sr1 /* Restore sr1 */ 1597 mtsp t1, %sr1 /* Restore sr1 */
1603 dbit_unlock0 spc,t0
1604 1598
1599 tlb_unlock0 spc,t0
1605 rfir 1600 rfir
1606 nop 1601 nop
1607 1602
@@ -1612,16 +1607,16 @@ dbit_trap_20:
1612 1607
1613 L2_ptep ptp,pte,t0,va,dbit_fault 1608 L2_ptep ptp,pte,t0,va,dbit_fault
1614 1609
1615 dbit_lock spc,t0,t1 1610 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1616 update_dirty spc,ptp,pte,t1 1611 update_dirty ptp,pte,t1
1617 1612
1618 make_insert_tlb spc,pte,prot 1613 make_insert_tlb spc,pte,prot
1619 1614
1620 f_extend pte,t1 1615 f_extend pte,t1
1621 1616
1622 idtlbt pte,prot 1617 idtlbt pte,prot
1623 dbit_unlock0 spc,t0
1624 1618
1619 tlb_unlock0 spc,t0
1625 rfir 1620 rfir
1626 nop 1621 nop
1627#endif 1622#endif
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 6548fd1d2e62..b99b39f1da02 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -43,10 +43,6 @@
43 43
44#include "../math-emu/math-emu.h" /* for handle_fpe() */ 44#include "../math-emu/math-emu.h" /* for handle_fpe() */
45 45
46#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
47DEFINE_SPINLOCK(pa_dbit_lock);
48#endif
49
50static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 46static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
51 struct pt_regs *regs); 47 struct pt_regs *regs);
52 48
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index ccde8f084ce4..112ccf497562 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -52,6 +52,22 @@
52 .text 52 .text
53 53
54/* 54/*
55 * Used by threads when the lock bit of core_idle_state is set.
56 * Threads will spin in HMT_LOW until the lock bit is cleared.
57 * r14 - pointer to core_idle_state
58 * r15 - used to load contents of core_idle_state
59 */
60
61core_idle_lock_held:
62 HMT_LOW
633: lwz r15,0(r14)
64 andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
65 bne 3b
66 HMT_MEDIUM
67 lwarx r15,0,r14
68 blr
69
70/*
55 * Pass requested state in r3: 71 * Pass requested state in r3:
56 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE 72 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
57 * 73 *
@@ -150,6 +166,10 @@ power7_enter_nap_mode:
150 ld r14,PACA_CORE_IDLE_STATE_PTR(r13) 166 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
151lwarx_loop1: 167lwarx_loop1:
152 lwarx r15,0,r14 168 lwarx r15,0,r14
169
170 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
171 bnel core_idle_lock_held
172
153 andc r15,r15,r7 /* Clear thread bit */ 173 andc r15,r15,r7 /* Clear thread bit */
154 174
155 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS 175 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
@@ -294,7 +314,7 @@ lwarx_loop2:
294 * workaround undo code or resyncing timebase or restoring context 314 * workaround undo code or resyncing timebase or restoring context
295 * In either case loop until the lock bit is cleared. 315 * In either case loop until the lock bit is cleared.
296 */ 316 */
297 bne core_idle_lock_held 317 bnel core_idle_lock_held
298 318
299 cmpwi cr2,r15,0 319 cmpwi cr2,r15,0
300 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) 320 lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
@@ -319,15 +339,6 @@ lwarx_loop2:
319 isync 339 isync
320 b common_exit 340 b common_exit
321 341
322core_idle_lock_held:
323 HMT_LOW
324core_idle_lock_loop:
325 lwz r15,0(14)
326 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
327 bne core_idle_lock_loop
328 HMT_MEDIUM
329 b lwarx_loop2
330
331first_thread_in_subcore: 342first_thread_in_subcore:
332 /* First thread in subcore to wakeup */ 343 /* First thread in subcore to wakeup */
333 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT 344 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
966 966
967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) 967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
968{ 968{
969 memset(to, 0, sizeof *to);
970
971 if (copy_from_user(to, from, 3*sizeof(int)) || 969 if (copy_from_user(to, from, 3*sizeof(int)) ||
972 copy_from_user(to->_sifields._pad, 970 copy_from_user(to->_sifields._pad,
973 from->_sifields._pad, SI_PAD_SIZE32)) 971 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6530f1b8874d..37de90f8a845 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs)
297 297
298 __this_cpu_inc(irq_stat.mce_exceptions); 298 __this_cpu_inc(irq_stat.mce_exceptions);
299 299
300 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
301
300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 302 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
301 handled = cur_cpu_spec->machine_check_early(regs); 303 handled = cur_cpu_spec->machine_check_early(regs);
302 return handled; 304 return handled;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6d535973b200..a67c6d781c52 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
529 printk(KERN_ALERT "Unable to handle kernel paging request for " 529 printk(KERN_ALERT "Unable to handle kernel paging request for "
530 "instruction fetch\n"); 530 "instruction fetch\n");
531 break; 531 break;
532 case 0x600:
533 printk(KERN_ALERT "Unable to handle kernel paging request for "
534 "unaligned access at address 0x%08lx\n", regs->dar);
535 break;
532 default: 536 default:
533 printk(KERN_ALERT "Unable to handle kernel paging request for " 537 printk(KERN_ALERT "Unable to handle kernel paging request for "
534 "unknown fault\n"); 538 "unknown fault\n");
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index ec2eb20631d1..df956295c2a7 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
320 if (!attr) 320 if (!attr)
321 return NULL; 321 return NULL;
322 322
323 sysfs_attr_init(&attr->attr.attr);
324
323 attr->var = str; 325 attr->var = str;
324 attr->attr.attr.name = name; 326 attr->attr.attr.name = name;
325 attr->attr.attr.mode = 0444; 327 attr->attr.attr.mode = 0444;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
1478 } 1478 }
1479 1479
1480 /* Unmask the event */ 1480 /* Unmask the event */
1481 if (eeh_enabled()) 1481 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1482 enable_irq(eeh_event_irq); 1482 enable_irq(eeh_event_irq);
1483 1483
1484 return ret; 1484 return ret;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 4949ef0d9400..37f959bf392e 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
237 return elog; 237 return elog;
238} 238}
239 239
240static void elog_work_fn(struct work_struct *work) 240static irqreturn_t elog_event(int irq, void *data)
241{ 241{
242 __be64 size; 242 __be64 size;
243 __be64 id; 243 __be64 id;
@@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work)
251 rc = opal_get_elog_size(&id, &size, &type); 251 rc = opal_get_elog_size(&id, &size, &type);
252 if (rc != OPAL_SUCCESS) { 252 if (rc != OPAL_SUCCESS) {
253 pr_err("ELOG: OPAL log info read failed\n"); 253 pr_err("ELOG: OPAL log info read failed\n");
254 return; 254 return IRQ_HANDLED;
255 } 255 }
256 256
257 elog_size = be64_to_cpu(size); 257 elog_size = be64_to_cpu(size);
@@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work)
270 * entries. 270 * entries.
271 */ 271 */
272 if (kset_find_obj(elog_kset, name)) 272 if (kset_find_obj(elog_kset, name))
273 return; 273 return IRQ_HANDLED;
274 274
275 create_elog_obj(log_id, elog_size, elog_type); 275 create_elog_obj(log_id, elog_size, elog_type);
276}
277
278static DECLARE_WORK(elog_work, elog_work_fn);
279 276
280static irqreturn_t elog_event(int irq, void *data)
281{
282 schedule_work(&elog_work);
283 return IRQ_HANDLED; 277 return IRQ_HANDLED;
284} 278}
285 279
@@ -304,8 +298,8 @@ int __init opal_elog_init(void)
304 return irq; 298 return irq;
305 } 299 }
306 300
307 rc = request_irq(irq, elog_event, 301 rc = request_threaded_irq(irq, NULL, elog_event,
308 IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL); 302 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
309 if (rc) { 303 if (rc) {
310 pr_err("%s: Can't request OPAL event irq (%d)\n", 304 pr_err("%s: Can't request OPAL event irq (%d)\n",
311 __func__, rc); 305 __func__, rc);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 46cb3feb0a13..4ece8e40dd54 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file)
112static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) 112static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
113{ 113{
114 size_t addr, size; 114 size_t addr, size;
115 pgprot_t page_prot;
115 int rc; 116 int rc;
116 117
117 pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", 118 pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
@@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
125 if (!opal_prd_range_is_valid(addr, size)) 126 if (!opal_prd_range_is_valid(addr, size))
126 return -EINVAL; 127 return -EINVAL;
127 128
128 vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file, 129 page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
129 vma->vm_pgoff, 130 size, vma->vm_page_prot);
130 size, vma->vm_page_prot))
131 | _PAGE_SPECIAL);
132 131
133 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, 132 rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
134 vma->vm_page_prot); 133 page_prot);
135 134
136 return rc; 135 return rc;
137} 136}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2220 2220
2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, 2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2222 unsigned levels, unsigned long limit, 2222 unsigned levels, unsigned long limit,
2223 unsigned long *current_offset) 2223 unsigned long *current_offset, unsigned long *total_allocated)
2224{ 2224{
2225 struct page *tce_mem = NULL; 2225 struct page *tce_mem = NULL;
2226 __be64 *addr, *tmp; 2226 __be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2236 } 2236 }
2237 addr = page_address(tce_mem); 2237 addr = page_address(tce_mem);
2238 memset(addr, 0, allocated); 2238 memset(addr, 0, allocated);
2239 *total_allocated += allocated;
2239 2240
2240 --levels; 2241 --levels;
2241 if (!levels) { 2242 if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2245 2246
2246 for (i = 0; i < entries; ++i) { 2247 for (i = 0; i < entries; ++i) {
2247 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, 2248 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2248 levels, limit, current_offset); 2249 levels, limit, current_offset, total_allocated);
2249 if (!tmp) 2250 if (!tmp)
2250 break; 2251 break;
2251 2252
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2267 struct iommu_table *tbl) 2268 struct iommu_table *tbl)
2268{ 2269{
2269 void *addr; 2270 void *addr;
2270 unsigned long offset = 0, level_shift; 2271 unsigned long offset = 0, level_shift, total_allocated = 0;
2271 const unsigned window_shift = ilog2(window_size); 2272 const unsigned window_shift = ilog2(window_size);
2272 unsigned entries_shift = window_shift - page_shift; 2273 unsigned entries_shift = window_shift - page_shift;
2273 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); 2274 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2286 2287
2287 /* Allocate TCE table */ 2288 /* Allocate TCE table */
2288 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2289 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2289 levels, tce_table_size, &offset); 2290 levels, tce_table_size, &offset, &total_allocated);
2290 2291
2291 /* addr==NULL means that the first level allocation failed */ 2292 /* addr==NULL means that the first level allocation failed */
2292 if (!addr) 2293 if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2308 page_shift); 2309 page_shift);
2309 tbl->it_level_size = 1ULL << (level_shift - 3); 2310 tbl->it_level_size = 1ULL << (level_shift - 3);
2310 tbl->it_indirect_levels = levels - 1; 2311 tbl->it_indirect_levels = levels - 1;
2311 tbl->it_allocated_size = offset; 2312 tbl->it_allocated_size = total_allocated;
2312 2313
2313 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", 2314 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2314 window_size, tce_table_size, bus_offset); 2315 window_size, tce_table_size, bus_offset);
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 2bc33674ebfc..87f9623ca805 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -18,6 +18,7 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/semaphore.h> 19#include <linux/semaphore.h>
20#include <asm/msi_bitmap.h> 20#include <asm/msi_bitmap.h>
21#include <asm/ppc-pci.h>
21 22
22struct ppc4xx_hsta_msi { 23struct ppc4xx_hsta_msi {
23 struct device *dev; 24 struct device *dev;
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index dc5385ebb071..5ad26dd94d77 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -3,5 +3,6 @@
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += mcs_spinlock.h 5generic-y += mcs_spinlock.h
6generic-y += mm-arch-hooks.h
6generic-y += preempt.h 7generic-y += preempt.h
7generic-y += trace_clock.h 8generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index cfad7fca01d6..d7697ab802f6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -57,7 +57,10 @@ union ctlreg0 {
57 unsigned long lap : 1; /* Low-address-protection control */ 57 unsigned long lap : 1; /* Low-address-protection control */
58 unsigned long : 4; 58 unsigned long : 4;
59 unsigned long edat : 1; /* Enhanced-DAT-enablement control */ 59 unsigned long edat : 1; /* Enhanced-DAT-enablement control */
60 unsigned long : 23; 60 unsigned long : 4;
61 unsigned long afp : 1; /* AFP-register control */
62 unsigned long vx : 1; /* Vector enablement control */
63 unsigned long : 17;
61 }; 64 };
62}; 65};
63 66
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 0130d0379edd..d9be7c0c1291 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -14,6 +14,7 @@
14 14
15#define is_hugepage_only_range(mm, addr, len) 0 15#define is_hugepage_only_range(mm, addr, len) 0
16#define hugetlb_free_pgd_range free_pgd_range 16#define hugetlb_free_pgd_range free_pgd_range
17#define hugepages_supported() (MACHINE_HAS_HPAGE)
17 18
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte); 20 pte_t *ptep, pte_t pte);
diff --git a/arch/s390/include/asm/mm-arch-hooks.h b/arch/s390/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 07680b2f3c59..000000000000
--- a/arch/s390/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_S390_MM_ARCH_HOOKS_H
13#define _ASM_S390_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_S390_MM_ARCH_HOOKS_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index dd345238d9a7..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -17,10 +17,7 @@
17#define PAGE_DEFAULT_ACC 0 17#define PAGE_DEFAULT_ACC 0
18#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) 18#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
19 19
20#include <asm/setup.h> 20#define HPAGE_SHIFT 20
21#ifndef __ASSEMBLY__
22
23extern int HPAGE_SHIFT;
24#define HPAGE_SIZE (1UL << HPAGE_SHIFT) 21#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
25#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 22#define HPAGE_MASK (~(HPAGE_SIZE - 1))
26#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 23#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
@@ -30,6 +27,9 @@ extern int HPAGE_SHIFT;
30#define ARCH_HAS_PREPARE_HUGEPAGE 27#define ARCH_HAS_PREPARE_HUGEPAGE
31#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH 28#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
32 29
30#include <asm/setup.h>
31#ifndef __ASSEMBLY__
32
33static inline void storage_key_init_range(unsigned long start, unsigned long end) 33static inline void storage_key_init_range(unsigned long start, unsigned long end)
34{ 34{
35#if PAGE_DEFAULT_KEY 35#if PAGE_DEFAULT_KEY
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 4cb19fe76dd9..f897ec73dc8c 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -87,7 +87,15 @@ struct sf_raw_sample {
87} __packed; 87} __packed;
88 88
89/* Perf hardware reserve and release functions */ 89/* Perf hardware reserve and release functions */
90#ifdef CONFIG_PERF_EVENTS
90int perf_reserve_sampling(void); 91int perf_reserve_sampling(void);
91void perf_release_sampling(void); 92void perf_release_sampling(void);
93#else /* CONFIG_PERF_EVENTS */
94static inline int perf_reserve_sampling(void)
95{
96 return 0;
97}
98static inline void perf_release_sampling(void) {}
99#endif /* CONFIG_PERF_EVENTS */
92 100
93#endif /* _ASM_S390_PERF_EVENT_H */ 101#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c7d1b9d09011..a2da259d9327 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,15 +23,15 @@
23 23
24int main(void) 24int main(void)
25{ 25{
26 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 26 DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
27 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 27 DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
28 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
29 BLANK();
30 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
31 BLANK(); 29 BLANK();
32 DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); 30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
33 DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); 31 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
34 DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); 32 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
33 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
34 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
35 BLANK(); 35 BLANK();
36 DEFINE(__TI_task, offsetof(struct thread_info, task)); 36 DEFINE(__TI_task, offsetof(struct thread_info, task));
37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
179 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
180 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 179 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
181 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 180 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
182 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 181 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index bff5e3b6d822..8ba32436effe 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
138 union cache_topology ct; 138 union cache_topology ct;
139 enum cache_type ctype; 139 enum cache_type ctype;
140 140
141 if (!test_facility(34))
142 return -EOPNOTSUPP;
141 if (!this_cpu_ci) 143 if (!this_cpu_ci)
142 return -EINVAL; 144 return -EINVAL;
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3238893c9d4f..84062e7a77da 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
178 */ 178 */
179ENTRY(__switch_to) 179ENTRY(__switch_to)
180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
181 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev 181 lgr %r1,%r2
182 lg %r4,__THREAD_info(%r2) # get thread_info of prev 182 aghi %r1,__TASK_thread # thread_struct of prev task
183 lg %r5,__THREAD_info(%r3) # get thread_info of next 183 lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
184 lg %r5,__TASK_thread_info(%r3) # get thread_info of next
185 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
186 lgr %r1,%r3
187 aghi %r1,__TASK_thread # thread_struct of next task
184 lgr %r15,%r5 188 lgr %r15,%r5
185 aghi %r15,STACK_INIT # end of kernel stack of next 189 aghi %r15,STACK_INIT # end of kernel stack of next
186 stg %r3,__LC_CURRENT # store task struct of next 190 stg %r3,__LC_CURRENT # store task struct of next
187 stg %r5,__LC_THREAD_INFO # store thread info of next 191 stg %r5,__LC_THREAD_INFO # store thread info of next
188 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 192 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
193 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
189 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 194 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
190 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 195 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
191 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 196 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 br %r14 197 br %r14
194 198
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
417 LAST_BREAK %r14 421 LAST_BREAK %r14
418 lg %r15,__LC_KERNEL_STACK 422 lg %r15,__LC_KERNEL_STACK
419 lg %r14,__TI_task(%r12) 423 lg %r14,__TI_task(%r12)
424 aghi %r14,__TASK_thread # pointer to thread_struct
420 lghi %r13,__LC_PGM_TDB 425 lghi %r13,__LC_PGM_TDB
421 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 426 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
422 jz 2f 427 jz 2f
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 505c17c0ae1a..56b550893593 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -21,6 +21,7 @@
21#include <asm/nmi.h> 21#include <asm/nmi.h>
22#include <asm/crw.h> 22#include <asm/crw.h>
23#include <asm/switch_to.h> 23#include <asm/switch_to.h>
24#include <asm/ctl_reg.h>
24 25
25struct mcck_struct { 26struct mcck_struct {
26 int kill_task; 27 int kill_task;
@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
129 } else 130 } else
130 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); 131 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
131 132
132 asm volatile( 133 if (!MACHINE_HAS_VX) {
133 " ld 0,0(%0)\n" 134 /* Revalidate floating point registers */
134 " ld 1,8(%0)\n" 135 asm volatile(
135 " ld 2,16(%0)\n" 136 " ld 0,0(%0)\n"
136 " ld 3,24(%0)\n" 137 " ld 1,8(%0)\n"
137 " ld 4,32(%0)\n" 138 " ld 2,16(%0)\n"
138 " ld 5,40(%0)\n" 139 " ld 3,24(%0)\n"
139 " ld 6,48(%0)\n" 140 " ld 4,32(%0)\n"
140 " ld 7,56(%0)\n" 141 " ld 5,40(%0)\n"
141 " ld 8,64(%0)\n" 142 " ld 6,48(%0)\n"
142 " ld 9,72(%0)\n" 143 " ld 7,56(%0)\n"
143 " ld 10,80(%0)\n" 144 " ld 8,64(%0)\n"
144 " ld 11,88(%0)\n" 145 " ld 9,72(%0)\n"
145 " ld 12,96(%0)\n" 146 " ld 10,80(%0)\n"
146 " ld 13,104(%0)\n" 147 " ld 11,88(%0)\n"
147 " ld 14,112(%0)\n" 148 " ld 12,96(%0)\n"
148 " ld 15,120(%0)\n" 149 " ld 13,104(%0)\n"
149 : : "a" (fpt_save_area)); 150 " ld 14,112(%0)\n"
150 /* Revalidate vector registers */ 151 " ld 15,120(%0)\n"
151 if (MACHINE_HAS_VX && current->thread.vxrs) { 152 : : "a" (fpt_save_area));
153 } else {
154 /* Revalidate vector registers */
155 union ctlreg0 cr0;
156
152 if (!mci->vr) { 157 if (!mci->vr) {
153 /* 158 /*
154 * Vector registers can't be restored and therefore 159 * Vector registers can't be restored and therefore
@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
156 */ 161 */
157 kill_task = 1; 162 kill_task = 1;
158 } 163 }
164 cr0.val = S390_lowcore.cregs_save_area[0];
165 cr0.afp = cr0.vx = 1;
166 __ctl_load(cr0.val, 0, 0);
159 restore_vx_regs((__vector128 *) 167 restore_vx_regs((__vector128 *)
160 S390_lowcore.vector_save_area_addr); 168 &S390_lowcore.vector_save_area);
169 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
161 } 170 }
162 /* Revalidate access registers */ 171 /* Revalidate access registers */
163 asm volatile( 172 asm volatile(
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index dc5edc29b73a..8f587d871b9f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
163asmlinkage void execve_tail(void) 163asmlinkage void execve_tail(void)
164{ 164{
165 current->thread.fp_regs.fpc = 0; 165 current->thread.fp_regs.fpc = 0;
166 asm volatile("sfpc %0,%0" : : "d" (0)); 166 asm volatile("sfpc %0" : : "d" (0));
167} 167}
168 168
169/* 169/*
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 43c3169ea49c..ada0c07fe1a8 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
270 jno .Lesa2 270 jno .Lesa2
271 ahi %r15,-80 271 ahi %r15,-80
272 stmh %r6,%r15,96(%r15) # store upper register halves 272 stmh %r6,%r15,96(%r15) # store upper register halves
273 basr %r13,0
274 lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
273.Lesa2: 275.Lesa2:
274 lr %r10,%r2 # save string pointer 276 lr %r10,%r2 # save string pointer
275 lhi %r2,0 277 lhi %r2,0
@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
291.Lesa3: 293.Lesa3:
292 lm %r6,%r15,120(%r15) # restore registers 294 lm %r6,%r15,120(%r15) # restore registers
293 br %r14 295 br %r14
296.Lzeroes:
297 .fill 64,4,0
294 298
295.LwritedataS4: 299.LwritedataS4:
296 .long 0x00760005 # SCLP command for write data 300 .long 0x00760005 # SCLP command for write data
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f7f027caaaaa..ca070d260af2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -885,8 +885,6 @@ void __init setup_arch(char **cmdline_p)
885 */ 885 */
886 setup_hwcaps(); 886 setup_hwcaps();
887 887
888 HPAGE_SHIFT = MACHINE_HAS_HPAGE ? 20 : 0;
889
890 /* 888 /*
891 * Create kernel page tables and switch to virtual addressing. 889 * Create kernel page tables and switch to virtual addressing.
892 */ 890 */
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4d96c9f53455..7bea81d8a363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
259 } 259 }
260 260
261 /* get vector interrupt code from fpc */ 261 /* get vector interrupt code from fpc */
262 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 262 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; 263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
264 switch (vic) { 264 switch (vic) {
265 case 1: /* invalid vector operation */ 265 case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
297 297
298 location = get_trap_ip(regs); 298 location = get_trap_ip(regs);
299 299
300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 300 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
301 /* Check for vector register enablement */ 301 /* Check for vector register enablement */
302 if (MACHINE_HAS_VX && !current->thread.vxrs && 302 if (MACHINE_HAS_VX && !current->thread.vxrs &&
303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
1742 1742
1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1744{ 1744{
1745 if (!vcpu->requests)
1746 return 0;
1747retry: 1745retry:
1748 kvm_s390_vcpu_request_handled(vcpu); 1746 kvm_s390_vcpu_request_handled(vcpu);
1747 if (!vcpu->requests)
1748 return 0;
1749 /* 1749 /*
1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the 1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 33082d0d101b..b33f66110ca9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -31,8 +31,6 @@
31#define ALLOC_ORDER 2 31#define ALLOC_ORDER 2
32#define FRAG_MASK 0x03 32#define FRAG_MASK 0x03
33 33
34int HPAGE_SHIFT;
35
36unsigned long *crst_table_alloc(struct mm_struct *mm) 34unsigned long *crst_table_alloc(struct mm_struct *mm)
37{ 35{
38 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); 36 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fee782acc2ee..8d2e5165865f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, 448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
449 BPF_REG_1, offsetof(struct sk_buff, data)); 449 BPF_REG_1, offsetof(struct sk_buff, data));
450 } 450 }
451 /* BPF compatibility: clear A (%b7) and X (%b8) registers */ 451 /* BPF compatibility: clear A (%b0) and X (%b7) registers */
452 if (REG_SEEN(BPF_REG_7)) 452 if (REG_SEEN(BPF_REG_A))
453 /* lghi %b7,0 */ 453 /* lghi %ba,0 */
454 EMIT4_IMM(0xa7090000, BPF_REG_7, 0); 454 EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
455 if (REG_SEEN(BPF_REG_8)) 455 if (REG_SEEN(BPF_REG_X))
456 /* lghi %b8,0 */ 456 /* lghi %bx,0 */
457 EMIT4_IMM(0xa7090000, BPF_REG_8, 0); 457 EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
458} 458}
459 459
460/* 460/*
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index bc927a09a172..9cfa2ffaa9d6 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -16,6 +16,7 @@
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/perf_event.h>
19 20
20#include "../../../drivers/oprofile/oprof.h" 21#include "../../../drivers/oprofile/oprof.h"
21 22
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 138fb3db45ba..92ffe397b893 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += clkdev.h
7generic-y += cputime.h 7generic-y += cputime.h
8generic-y += irq_work.h 8generic-y += irq_work.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += mm-arch-hooks.h
10generic-y += preempt.h 11generic-y += preempt.h
11generic-y += sections.h 12generic-y += sections.h
12generic-y += trace_clock.h 13generic-y += trace_clock.h
diff --git a/arch/score/include/asm/mm-arch-hooks.h b/arch/score/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 5e38689f189a..000000000000
--- a/arch/score/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_SCORE_MM_ARCH_HOOKS_H
13#define _ASM_SCORE_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_SCORE_MM_ARCH_HOOKS_H */
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 9ac4626e7284..aac452b26aa8 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += kvm_para.h
16generic-y += local.h 16generic-y += local.h
17generic-y += local64.h 17generic-y += local64.h
18generic-y += mcs_spinlock.h 18generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h
19generic-y += mman.h 20generic-y += mman.h
20generic-y += msgbuf.h 21generic-y += msgbuf.h
21generic-y += param.h 22generic-y += param.h
diff --git a/arch/sh/include/asm/mm-arch-hooks.h b/arch/sh/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 18087298b728..000000000000
--- a/arch/sh/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_SH_MM_ARCH_HOOKS_H
13#define _ASM_SH_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_SH_MM_ARCH_HOOKS_H */
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2b2a69dcc467..e928618838bc 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -12,6 +12,7 @@ generic-y += linkage.h
12generic-y += local.h 12generic-y += local.h
13generic-y += local64.h 13generic-y += local64.h
14generic-y += mcs_spinlock.h 14generic-y += mcs_spinlock.h
15generic-y += mm-arch-hooks.h
15generic-y += module.h 16generic-y += module.h
16generic-y += mutex.h 17generic-y += mutex.h
17generic-y += preempt.h 18generic-y += preempt.h
diff --git a/arch/sparc/include/asm/mm-arch-hooks.h b/arch/sparc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index b89ba44c16f1..000000000000
--- a/arch/sparc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_SPARC_MM_ARCH_HOOKS_H
13#define _ASM_SPARC_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_SPARC_MM_ARCH_HOOKS_H */
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */ 28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */
29 29
30#define VISEntryHalf \ 30#define VISEntryHalf \
31 rd %fprs, %o5; \ 31 VISEntry
32 andcc %o5, FPRS_FEF, %g0; \ 32
33 be,pt %icc, 297f; \ 33#define VISExitHalf \
34 sethi %hi(298f), %g7; \ 34 VISExit
35 sethi %hi(VISenterhalf), %g1; \
36 jmpl %g1 + %lo(VISenterhalf), %g0; \
37 or %g7, %lo(298f), %g7; \
38 clr %o5; \
39297: wr %o5, FPRS_FEF, %fprs; \
40298:
41 35
42#define VISEntryHalfFast(fail_label) \ 36#define VISEntryHalfFast(fail_label) \
43 rd %fprs, %o5; \ 37 rd %fprs, %o5; \
@@ -47,7 +41,7 @@
47 ba,a,pt %xcc, fail_label; \ 41 ba,a,pt %xcc, fail_label; \
48297: wr %o5, FPRS_FEF, %fprs; 42297: wr %o5, FPRS_FEF, %fprs;
49 43
50#define VISExitHalf \ 44#define VISExitHalfFast \
51 wr %o5, 0, %fprs; 45 wr %o5, 0, %fprs;
52 46
53#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
240 add %o0, 0x40, %o0 240 add %o0, 0x40, %o0
241 bne,pt %icc, 1b 241 bne,pt %icc, 1b
242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
243#ifdef NON_USER_COPY
244 VISExitHalfFast
245#else
243 VISExitHalf 246 VISExitHalf
244 247#endif
245 brz,pn %o2, .Lexit 248 brz,pn %o2, .Lexit
246 cmp %o2, 19 249 cmp %o2, 19
247 ble,pn %icc, .Lsmall_unaligned 250 ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
44 44
45 stx %g3, [%g6 + TI_GSR] 45 stx %g3, [%g6 + TI_GSR]
462: add %g6, %g1, %g3 462: add %g6, %g1, %g3
47 cmp %o5, FPRS_DU 47 mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
48 be,pn %icc, 6f 48 sll %g1, 3, %g1
49 sll %g1, 3, %g1
50 stb %o5, [%g3 + TI_FPSAVED] 49 stb %o5, [%g3 + TI_FPSAVED]
51 rd %gsr, %g2 50 rd %gsr, %g2
52 add %g6, %g1, %g3 51 add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80 .align 32 79 .align 32
8180: jmpl %g7 + %g0, %g0 8080: jmpl %g7 + %g0, %g0
82 nop 81 nop
83
846: ldub [%g3 + TI_FPSAVED], %o5
85 or %o5, FPRS_DU, %o5
86 add %g6, TI_FPREGS+0x80, %g2
87 stb %o5, [%g3 + TI_FPSAVED]
88
89 sll %g1, 5, %g1
90 add %g6, TI_FPREGS+0xc0, %g3
91 wr %g0, FPRS_FEF, %fprs
92 membar #Sync
93 stda %f32, [%g2 + %g1] ASI_BLK_P
94 stda %f48, [%g3 + %g1] ASI_BLK_P
95 membar #Sync
96 ba,pt %xcc, 80f
97 nop
98
99 .align 32
10080: jmpl %g7 + %g0, %g0
101 nop
102
103 .align 32
104VISenterhalf:
105 ldub [%g6 + TI_FPDEPTH], %g1
106 brnz,a,pn %g1, 1f
107 cmp %g1, 1
108 stb %g0, [%g6 + TI_FPSAVED]
109 stx %fsr, [%g6 + TI_XFSR]
110 clr %o5
111 jmpl %g7 + %g0, %g0
112 wr %g0, FPRS_FEF, %fprs
113
1141: bne,pn %icc, 2f
115 srl %g1, 1, %g1
116 ba,pt %xcc, vis1
117 sub %g7, 8, %g7
1182: addcc %g6, %g1, %g3
119 sll %g1, 3, %g1
120 andn %o5, FPRS_DU, %g2
121 stb %g2, [%g3 + TI_FPSAVED]
122
123 rd %gsr, %g2
124 add %g6, %g1, %g3
125 stx %g2, [%g3 + TI_GSR]
126 add %g6, %g1, %g2
127 stx %fsr, [%g2 + TI_XFSR]
128 sll %g1, 5, %g1
1293: andcc %o5, FPRS_DL, %g0
130 be,pn %icc, 4f
131 add %g6, TI_FPREGS, %g2
132
133 add %g6, TI_FPREGS+0x40, %g3
134 membar #Sync
135 stda %f0, [%g2 + %g1] ASI_BLK_P
136 stda %f16, [%g3 + %g1] ASI_BLK_P
137 membar #Sync
138 ba,pt %xcc, 4f
139 nop
140
141 .align 32
1424: and %o5, FPRS_DU, %o5
143 jmpl %g7 + %g0, %g0
144 wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
135void VISenter(void); 135void VISenter(void);
136EXPORT_SYMBOL(VISenter); 136EXPORT_SYMBOL(VISenter);
137 137
138/* CRYPTO code needs this */
139void VISenterhalf(void);
140EXPORT_SYMBOL(VISenterhalf);
141
142extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 138extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
143extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 139extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
144 unsigned long *); 140 unsigned long *);
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index d53654488c2c..d8a843163471 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_regs.h
19generic-y += local.h 19generic-y += local.h
20generic-y += local64.h 20generic-y += local64.h
21generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
22generic-y += mm-arch-hooks.h
22generic-y += msgbuf.h 23generic-y += msgbuf.h
23generic-y += mutex.h 24generic-y += mutex.h
24generic-y += param.h 25generic-y += param.h
diff --git a/arch/tile/include/asm/mm-arch-hooks.h b/arch/tile/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d1709ea774f7..000000000000
--- a/arch/tile/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_TILE_MM_ARCH_HOOKS_H
13#define _ASM_TILE_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_TILE_MM_ARCH_HOOKS_H */
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) 113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
114 return -EFAULT; 114 return -EFAULT;
115 115
116 memset(to, 0, sizeof(*to));
117
118 err = __get_user(to->si_signo, &from->si_signo); 116 err = __get_user(to->si_signo, &from->si_signo);
119 err |= __get_user(to->si_errno, &from->si_errno); 117 err |= __get_user(to->si_errno, &from->si_errno);
120 err |= __get_user(to->si_code, &from->si_code); 118 err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 99c9ff87e018..6b755d125783 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
1139 1139
1140void __init free_initrd_mem(unsigned long begin, unsigned long end) 1140void __init free_initrd_mem(unsigned long begin, unsigned long end)
1141{ 1141{
1142 free_bootmem(__pa(begin), end - begin); 1142 free_bootmem_late(__pa(begin), end - begin);
1143} 1143}
1144 1144
1145static int __init setup_initrd(char *str) 1145static int __init setup_initrd(char *str)
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 88c7016492c4..97bbb6060b25 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -28,7 +28,7 @@
28#define _ST(p, inst, v) \ 28#define _ST(p, inst, v) \
29 ({ \ 29 ({ \
30 asm("1: " #inst " %0, %1;" \ 30 asm("1: " #inst " %0, %1;" \
31 ".pushsection .coldtext.memcpy,\"ax\";" \ 31 ".pushsection .coldtext,\"ax\";" \
32 "2: { move r0, %2; jrp lr };" \ 32 "2: { move r0, %2; jrp lr };" \
33 ".section __ex_table,\"a\";" \ 33 ".section __ex_table,\"a\";" \
34 ".align 8;" \ 34 ".align 8;" \
@@ -41,7 +41,7 @@
41 ({ \ 41 ({ \
42 unsigned long __v; \ 42 unsigned long __v; \
43 asm("1: " #inst " %0, %1;" \ 43 asm("1: " #inst " %0, %1;" \
44 ".pushsection .coldtext.memcpy,\"ax\";" \ 44 ".pushsection .coldtext,\"ax\";" \
45 "2: { move r0, %2; jrp lr };" \ 45 "2: { move r0, %2; jrp lr };" \
46 ".section __ex_table,\"a\";" \ 46 ".section __ex_table,\"a\";" \
47 ".align 8;" \ 47 ".align 8;" \
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 3d63ff6f583f..149ec55f9c46 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += irq_regs.h
16generic-y += irq_work.h 16generic-y += irq_work.h
17generic-y += kdebug.h 17generic-y += kdebug.h
18generic-y += mcs_spinlock.h 18generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h
19generic-y += mutex.h 20generic-y += mutex.h
20generic-y += param.h 21generic-y += param.h
21generic-y += pci.h 22generic-y += pci.h
diff --git a/arch/um/include/asm/mm-arch-hooks.h b/arch/um/include/asm/mm-arch-hooks.h
deleted file mode 100644
index a7c8b0dfdd4e..000000000000
--- a/arch/um/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_UM_MM_ARCH_HOOKS_H
13#define _ASM_UM_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_UM_MM_ARCH_HOOKS_H */
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index d12b377b5a8b..1fc7a286dc6f 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -26,6 +26,7 @@ generic-y += kdebug.h
26generic-y += kmap_types.h 26generic-y += kmap_types.h
27generic-y += local.h 27generic-y += local.h
28generic-y += mcs_spinlock.h 28generic-y += mcs_spinlock.h
29generic-y += mm-arch-hooks.h
29generic-y += mman.h 30generic-y += mman.h
30generic-y += module.h 31generic-y += module.h
31generic-y += msgbuf.h 32generic-y += msgbuf.h
diff --git a/arch/unicore32/include/asm/mm-arch-hooks.h b/arch/unicore32/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 4d79a850c509..000000000000
--- a/arch/unicore32/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_UNICORE32_MM_ARCH_HOOKS_H
13#define _ASM_UNICORE32_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_UNICORE32_MM_ARCH_HOOKS_H */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 55bced17dc95..b3a1a5d77d92 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -41,6 +41,7 @@ config X86
41 select ARCH_USE_CMPXCHG_LOCKREF if X86_64 41 select ARCH_USE_CMPXCHG_LOCKREF if X86_64
42 select ARCH_USE_QUEUED_RWLOCKS 42 select ARCH_USE_QUEUED_RWLOCKS
43 select ARCH_USE_QUEUED_SPINLOCKS 43 select ARCH_USE_QUEUED_SPINLOCKS
44 select ARCH_WANTS_DYNAMIC_TASK_STRUCT
44 select ARCH_WANT_FRAME_POINTERS 45 select ARCH_WANT_FRAME_POINTERS
45 select ARCH_WANT_IPC_PARSE_VERSION if X86_32 46 select ARCH_WANT_IPC_PARSE_VERSION if X86_32
46 select ARCH_WANT_OPTIONAL_GPIOLIB 47 select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -254,6 +255,11 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
254config ARCH_SUPPORTS_DEBUG_PAGEALLOC 255config ARCH_SUPPORTS_DEBUG_PAGEALLOC
255 def_bool y 256 def_bool y
256 257
258config KASAN_SHADOW_OFFSET
259 hex
260 depends on KASAN
261 default 0xdffffc0000000000
262
257config HAVE_INTEL_TXT 263config HAVE_INTEL_TXT
258 def_bool y 264 def_bool y
259 depends on INTEL_IOMMU && ACPI 265 depends on INTEL_IOMMU && ACPI
@@ -2015,7 +2021,7 @@ config CMDLINE_BOOL
2015 2021
2016 To compile command line arguments into the kernel, 2022 To compile command line arguments into the kernel,
2017 set this option to 'Y', then fill in the 2023 set this option to 'Y', then fill in the
2018 the boot arguments in CONFIG_CMDLINE. 2024 boot arguments in CONFIG_CMDLINE.
2019 2025
2020 Systems with fully functional boot loaders (i.e. non-embedded) 2026 Systems with fully functional boot loaders (i.e. non-embedded)
2021 should leave this option set to 'N'. 2027 should leave this option set to 'N'.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index a15893d17c55..d8c0d3266173 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -297,6 +297,18 @@ config OPTIMIZE_INLINING
297 297
298 If unsure, say N. 298 If unsure, say N.
299 299
300config DEBUG_ENTRY
301 bool "Debug low-level entry code"
302 depends on DEBUG_KERNEL
303 ---help---
304 This option enables sanity checks in x86's low-level entry code.
305 Some of these sanity checks may slow down kernel entries and
306 exits or otherwise impact performance.
307
308 This is currently used to help test NMI code.
309
310 If unsure, say N.
311
300config DEBUG_NMI_SELFTEST 312config DEBUG_NMI_SELFTEST
301 bool "NMI Selftest" 313 bool "NMI Selftest"
302 depends on DEBUG_KERNEL && X86_LOCAL_APIC 314 depends on DEBUG_KERNEL && X86_LOCAL_APIC
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
1193 unsigned int e820_type = 0; 1193 unsigned int e820_type = 0;
1194 unsigned long m = efi->efi_memmap; 1194 unsigned long m = efi->efi_memmap;
1195 1195
1196#ifdef CONFIG_X86_64
1197 m |= (u64)efi->efi_memmap_hi << 32;
1198#endif
1199
1196 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); 1200 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
1197 switch (d->type) { 1201 switch (d->type) {
1198 case EFI_RESERVED_TYPE: 1202 case EFI_RESERVED_TYPE:
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3bb2c4302df1..8cb3e438f21e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1237,11 +1237,12 @@ ENTRY(nmi)
1237 * If the variable is not set and the stack is not the NMI 1237 * If the variable is not set and the stack is not the NMI
1238 * stack then: 1238 * stack then:
1239 * o Set the special variable on the stack 1239 * o Set the special variable on the stack
1240 * o Copy the interrupt frame into a "saved" location on the stack 1240 * o Copy the interrupt frame into an "outermost" location on the
1241 * o Copy the interrupt frame into a "copy" location on the stack 1241 * stack
1242 * o Copy the interrupt frame into an "iret" location on the stack
1242 * o Continue processing the NMI 1243 * o Continue processing the NMI
1243 * If the variable is set or the previous stack is the NMI stack: 1244 * If the variable is set or the previous stack is the NMI stack:
1244 * o Modify the "copy" location to jump to the repeate_nmi 1245 * o Modify the "iret" location to jump to the repeat_nmi
1245 * o return back to the first NMI 1246 * o return back to the first NMI
1246 * 1247 *
1247 * Now on exit of the first NMI, we first clear the stack variable 1248 * Now on exit of the first NMI, we first clear the stack variable
@@ -1250,31 +1251,151 @@ ENTRY(nmi)
1250 * a nested NMI that updated the copy interrupt stack frame, a 1251 * a nested NMI that updated the copy interrupt stack frame, a
1251 * jump will be made to the repeat_nmi code that will handle the second 1252 * jump will be made to the repeat_nmi code that will handle the second
1252 * NMI. 1253 * NMI.
1254 *
1255 * However, espfix prevents us from directly returning to userspace
1256 * with a single IRET instruction. Similarly, IRET to user mode
1257 * can fault. We therefore handle NMIs from user space like
1258 * other IST entries.
1253 */ 1259 */
1254 1260
1255 /* Use %rdx as our temp variable throughout */ 1261 /* Use %rdx as our temp variable throughout */
1256 pushq %rdx 1262 pushq %rdx
1257 1263
1264 testb $3, CS-RIP+8(%rsp)
1265 jz .Lnmi_from_kernel
1266
1267 /*
1268 * NMI from user mode. We need to run on the thread stack, but we
1269 * can't go through the normal entry paths: NMIs are masked, and
1270 * we don't want to enable interrupts, because then we'll end
1271 * up in an awkward situation in which IRQs are on but NMIs
1272 * are off.
1273 */
1274
1275 SWAPGS
1276 cld
1277 movq %rsp, %rdx
1278 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1279 pushq 5*8(%rdx) /* pt_regs->ss */
1280 pushq 4*8(%rdx) /* pt_regs->rsp */
1281 pushq 3*8(%rdx) /* pt_regs->flags */
1282 pushq 2*8(%rdx) /* pt_regs->cs */
1283 pushq 1*8(%rdx) /* pt_regs->rip */
1284 pushq $-1 /* pt_regs->orig_ax */
1285 pushq %rdi /* pt_regs->di */
1286 pushq %rsi /* pt_regs->si */
1287 pushq (%rdx) /* pt_regs->dx */
1288 pushq %rcx /* pt_regs->cx */
1289 pushq %rax /* pt_regs->ax */
1290 pushq %r8 /* pt_regs->r8 */
1291 pushq %r9 /* pt_regs->r9 */
1292 pushq %r10 /* pt_regs->r10 */
1293 pushq %r11 /* pt_regs->r11 */
1294 pushq %rbx /* pt_regs->rbx */
1295 pushq %rbp /* pt_regs->rbp */
1296 pushq %r12 /* pt_regs->r12 */
1297 pushq %r13 /* pt_regs->r13 */
1298 pushq %r14 /* pt_regs->r14 */
1299 pushq %r15 /* pt_regs->r15 */
1300
1301 /*
1302 * At this point we no longer need to worry about stack damage
1303 * due to nesting -- we're on the normal thread stack and we're
1304 * done with the NMI stack.
1305 */
1306
1307 movq %rsp, %rdi
1308 movq $-1, %rsi
1309 call do_nmi
1310
1311 /*
1312 * Return back to user mode. We must *not* do the normal exit
1313 * work, because we don't want to enable interrupts. Fortunately,
1314 * do_nmi doesn't modify pt_regs.
1315 */
1316 SWAPGS
1317 jmp restore_c_regs_and_iret
1318
1319.Lnmi_from_kernel:
1320 /*
1321 * Here's what our stack frame will look like:
1322 * +---------------------------------------------------------+
1323 * | original SS |
1324 * | original Return RSP |
1325 * | original RFLAGS |
1326 * | original CS |
1327 * | original RIP |
1328 * +---------------------------------------------------------+
1329 * | temp storage for rdx |
1330 * +---------------------------------------------------------+
1331 * | "NMI executing" variable |
1332 * +---------------------------------------------------------+
1333 * | iret SS } Copied from "outermost" frame |
1334 * | iret Return RSP } on each loop iteration; overwritten |
1335 * | iret RFLAGS } by a nested NMI to force another |
1336 * | iret CS } iteration if needed. |
1337 * | iret RIP } |
1338 * +---------------------------------------------------------+
1339 * | outermost SS } initialized in first_nmi; |
1340 * | outermost Return RSP } will not be changed before |
1341 * | outermost RFLAGS } NMI processing is done. |
1342 * | outermost CS } Copied to "iret" frame on each |
1343 * | outermost RIP } iteration. |
1344 * +---------------------------------------------------------+
1345 * | pt_regs |
1346 * +---------------------------------------------------------+
1347 *
1348 * The "original" frame is used by hardware. Before re-enabling
1349 * NMIs, we need to be done with it, and we need to leave enough
1350 * space for the asm code here.
1351 *
1352 * We return by executing IRET while RSP points to the "iret" frame.
1353 * That will either return for real or it will loop back into NMI
1354 * processing.
1355 *
1356 * The "outermost" frame is copied to the "iret" frame on each
1357 * iteration of the loop, so each iteration starts with the "iret"
1358 * frame pointing to the final return target.
1359 */
1360
1258 /* 1361 /*
1259 * If %cs was not the kernel segment, then the NMI triggered in user 1362 * Determine whether we're a nested NMI.
1260 * space, which means it is definitely not nested. 1363 *
1364 * If we interrupted kernel code between repeat_nmi and
1365 * end_repeat_nmi, then we are a nested NMI. We must not
1366 * modify the "iret" frame because it's being written by
1367 * the outer NMI. That's okay; the outer NMI handler is
1368 * about to about to call do_nmi anyway, so we can just
1369 * resume the outer NMI.
1261 */ 1370 */
1262 cmpl $__KERNEL_CS, 16(%rsp) 1371
1263 jne first_nmi 1372 movq $repeat_nmi, %rdx
1373 cmpq 8(%rsp), %rdx
1374 ja 1f
1375 movq $end_repeat_nmi, %rdx
1376 cmpq 8(%rsp), %rdx
1377 ja nested_nmi_out
13781:
1264 1379
1265 /* 1380 /*
1266 * Check the special variable on the stack to see if NMIs are 1381 * Now check "NMI executing". If it's set, then we're nested.
1267 * executing. 1382 * This will not detect if we interrupted an outer NMI just
1383 * before IRET.
1268 */ 1384 */
1269 cmpl $1, -8(%rsp) 1385 cmpl $1, -8(%rsp)
1270 je nested_nmi 1386 je nested_nmi
1271 1387
1272 /* 1388 /*
1273 * Now test if the previous stack was an NMI stack. 1389 * Now test if the previous stack was an NMI stack. This covers
1274 * We need the double check. We check the NMI stack to satisfy the 1390 * the case where we interrupt an outer NMI after it clears
1275 * race when the first NMI clears the variable before returning. 1391 * "NMI executing" but before IRET. We need to be careful, though:
1276 * We check the variable because the first NMI could be in a 1392 * there is one case in which RSP could point to the NMI stack
1277 * breakpoint routine using a breakpoint stack. 1393 * despite there being no NMI active: naughty userspace controls
1394 * RSP at the very beginning of the SYSCALL targets. We can
1395 * pull a fast one on naughty userspace, though: we program
1396 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1397 * if it controls the kernel's RSP. We set DF before we clear
1398 * "NMI executing".
1278 */ 1399 */
1279 lea 6*8(%rsp), %rdx 1400 lea 6*8(%rsp), %rdx
1280 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ 1401 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
@@ -1286,25 +1407,20 @@ ENTRY(nmi)
1286 cmpq %rdx, 4*8(%rsp) 1407 cmpq %rdx, 4*8(%rsp)
1287 /* If it is below the NMI stack, it is a normal NMI */ 1408 /* If it is below the NMI stack, it is a normal NMI */
1288 jb first_nmi 1409 jb first_nmi
1289 /* Ah, it is within the NMI stack, treat it as nested */ 1410
1411 /* Ah, it is within the NMI stack. */
1412
1413 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1414 jz first_nmi /* RSP was user controlled. */
1415
1416 /* This is a nested NMI. */
1290 1417
1291nested_nmi: 1418nested_nmi:
1292 /* 1419 /*
1293 * Do nothing if we interrupted the fixup in repeat_nmi. 1420 * Modify the "iret" frame to point to repeat_nmi, forcing another
1294 * It's about to repeat the NMI handler, so we are fine 1421 * iteration of NMI handling.
1295 * with ignoring this one.
1296 */ 1422 */
1297 movq $repeat_nmi, %rdx 1423 subq $8, %rsp
1298 cmpq 8(%rsp), %rdx
1299 ja 1f
1300 movq $end_repeat_nmi, %rdx
1301 cmpq 8(%rsp), %rdx
1302 ja nested_nmi_out
1303
13041:
1305 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
1306 leaq -1*8(%rsp), %rdx
1307 movq %rdx, %rsp
1308 leaq -10*8(%rsp), %rdx 1424 leaq -10*8(%rsp), %rdx
1309 pushq $__KERNEL_DS 1425 pushq $__KERNEL_DS
1310 pushq %rdx 1426 pushq %rdx
@@ -1318,61 +1434,42 @@ nested_nmi:
1318nested_nmi_out: 1434nested_nmi_out:
1319 popq %rdx 1435 popq %rdx
1320 1436
1321 /* No need to check faults here */ 1437 /* We are returning to kernel mode, so this cannot result in a fault. */
1322 INTERRUPT_RETURN 1438 INTERRUPT_RETURN
1323 1439
1324first_nmi: 1440first_nmi:
1325 /* 1441 /* Restore rdx. */
1326 * Because nested NMIs will use the pushed location that we
1327 * stored in rdx, we must keep that space available.
1328 * Here's what our stack frame will look like:
1329 * +-------------------------+
1330 * | original SS |
1331 * | original Return RSP |
1332 * | original RFLAGS |
1333 * | original CS |
1334 * | original RIP |
1335 * +-------------------------+
1336 * | temp storage for rdx |
1337 * +-------------------------+
1338 * | NMI executing variable |
1339 * +-------------------------+
1340 * | copied SS |
1341 * | copied Return RSP |
1342 * | copied RFLAGS |
1343 * | copied CS |
1344 * | copied RIP |
1345 * +-------------------------+
1346 * | Saved SS |
1347 * | Saved Return RSP |
1348 * | Saved RFLAGS |
1349 * | Saved CS |
1350 * | Saved RIP |
1351 * +-------------------------+
1352 * | pt_regs |
1353 * +-------------------------+
1354 *
1355 * The saved stack frame is used to fix up the copied stack frame
1356 * that a nested NMI may change to make the interrupted NMI iret jump
1357 * to the repeat_nmi. The original stack frame and the temp storage
1358 * is also used by nested NMIs and can not be trusted on exit.
1359 */
1360 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1361 movq (%rsp), %rdx 1442 movq (%rsp), %rdx
1362 1443
1363 /* Set the NMI executing variable on the stack. */ 1444 /* Make room for "NMI executing". */
1364 pushq $1 1445 pushq $0
1365 1446
1366 /* Leave room for the "copied" frame */ 1447 /* Leave room for the "iret" frame */
1367 subq $(5*8), %rsp 1448 subq $(5*8), %rsp
1368 1449
1369 /* Copy the stack frame to the Saved frame */ 1450 /* Copy the "original" frame to the "outermost" frame */
1370 .rept 5 1451 .rept 5
1371 pushq 11*8(%rsp) 1452 pushq 11*8(%rsp)
1372 .endr 1453 .endr
1373 1454
1374 /* Everything up to here is safe from nested NMIs */ 1455 /* Everything up to here is safe from nested NMIs */
1375 1456
1457#ifdef CONFIG_DEBUG_ENTRY
1458 /*
1459 * For ease of testing, unmask NMIs right away. Disabled by
1460 * default because IRET is very expensive.
1461 */
1462 pushq $0 /* SS */
1463 pushq %rsp /* RSP (minus 8 because of the previous push) */
1464 addq $8, (%rsp) /* Fix up RSP */
1465 pushfq /* RFLAGS */
1466 pushq $__KERNEL_CS /* CS */
1467 pushq $1f /* RIP */
1468 INTERRUPT_RETURN /* continues at repeat_nmi below */
14691:
1470#endif
1471
1472repeat_nmi:
1376 /* 1473 /*
1377 * If there was a nested NMI, the first NMI's iret will return 1474 * If there was a nested NMI, the first NMI's iret will return
1378 * here. But NMIs are still enabled and we can take another 1475 * here. But NMIs are still enabled and we can take another
@@ -1381,16 +1478,20 @@ first_nmi:
1381 * it will just return, as we are about to repeat an NMI anyway. 1478 * it will just return, as we are about to repeat an NMI anyway.
1382 * This makes it safe to copy to the stack frame that a nested 1479 * This makes it safe to copy to the stack frame that a nested
1383 * NMI will update. 1480 * NMI will update.
1481 *
1482 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1483 * we're repeating an NMI, gsbase has the same value that it had on
1484 * the first iteration. paranoid_entry will load the kernel
1485 * gsbase if needed before we call do_nmi. "NMI executing"
1486 * is zero.
1384 */ 1487 */
1385repeat_nmi: 1488 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1489
1386 /* 1490 /*
1387 * Update the stack variable to say we are still in NMI (the update 1491 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1388 * is benign for the non-repeat case, where 1 was pushed just above 1492 * here must not modify the "iret" frame while we're writing to
1389 * to this very stack slot). 1493 * it or it will end up containing garbage.
1390 */ 1494 */
1391 movq $1, 10*8(%rsp)
1392
1393 /* Make another copy, this one may be modified by nested NMIs */
1394 addq $(10*8), %rsp 1495 addq $(10*8), %rsp
1395 .rept 5 1496 .rept 5
1396 pushq -6*8(%rsp) 1497 pushq -6*8(%rsp)
@@ -1399,9 +1500,9 @@ repeat_nmi:
1399end_repeat_nmi: 1500end_repeat_nmi:
1400 1501
1401 /* 1502 /*
1402 * Everything below this point can be preempted by a nested 1503 * Everything below this point can be preempted by a nested NMI.
1403 * NMI if the first NMI took an exception and reset our iret stack 1504 * If this happens, then the inner NMI will change the "iret"
1404 * so that we repeat another NMI. 1505 * frame to point back to repeat_nmi.
1405 */ 1506 */
1406 pushq $-1 /* ORIG_RAX: no syscall to restart */ 1507 pushq $-1 /* ORIG_RAX: no syscall to restart */
1407 ALLOC_PT_GPREGS_ON_STACK 1508 ALLOC_PT_GPREGS_ON_STACK
@@ -1415,28 +1516,11 @@ end_repeat_nmi:
1415 */ 1516 */
1416 call paranoid_entry 1517 call paranoid_entry
1417 1518
1418 /*
1419 * Save off the CR2 register. If we take a page fault in the NMI then
1420 * it could corrupt the CR2 value. If the NMI preempts a page fault
1421 * handler before it was able to read the CR2 register, and then the
1422 * NMI itself takes a page fault, the page fault that was preempted
1423 * will read the information from the NMI page fault and not the
1424 * origin fault. Save it off and restore it if it changes.
1425 * Use the r12 callee-saved register.
1426 */
1427 movq %cr2, %r12
1428
1429 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ 1519 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1430 movq %rsp, %rdi 1520 movq %rsp, %rdi
1431 movq $-1, %rsi 1521 movq $-1, %rsi
1432 call do_nmi 1522 call do_nmi
1433 1523
1434 /* Did the NMI take a page fault? Restore cr2 if it did */
1435 movq %cr2, %rcx
1436 cmpq %rcx, %r12
1437 je 1f
1438 movq %r12, %cr2
14391:
1440 testl %ebx, %ebx /* swapgs needed? */ 1524 testl %ebx, %ebx /* swapgs needed? */
1441 jnz nmi_restore 1525 jnz nmi_restore
1442nmi_swapgs: 1526nmi_swapgs:
@@ -1444,11 +1528,26 @@ nmi_swapgs:
1444nmi_restore: 1528nmi_restore:
1445 RESTORE_EXTRA_REGS 1529 RESTORE_EXTRA_REGS
1446 RESTORE_C_REGS 1530 RESTORE_C_REGS
1447 /* Pop the extra iret frame at once */ 1531
1532 /* Point RSP at the "iret" frame. */
1448 REMOVE_PT_GPREGS_FROM_STACK 6*8 1533 REMOVE_PT_GPREGS_FROM_STACK 6*8
1449 1534
1450 /* Clear the NMI executing stack variable */ 1535 /*
1451 movq $0, 5*8(%rsp) 1536 * Clear "NMI executing". Set DF first so that we can easily
1537 * distinguish the remaining code between here and IRET from
1538 * the SYSCALL entry and exit paths. On a native kernel, we
1539 * could just inspect RIP, but, on paravirt kernels,
1540 * INTERRUPT_RETURN can translate into a jump into a
1541 * hypercall page.
1542 */
1543 std
1544 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1545
1546 /*
1547 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1548 * stack in a single instruction. We are returning to kernel
1549 * mode, so this cannot result in a fault.
1550 */
1452 INTERRUPT_RETURN 1551 INTERRUPT_RETURN
1453END(nmi) 1552END(nmi)
1454 1553
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index bb187a6a877c..a7e257d9cb90 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
140 */ 140 */
141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
142 movl RIP(%rsp), %ecx /* User %eip */ 142 movl RIP(%rsp), %ecx /* User %eip */
143 movq RAX(%rsp), %rax
143 RESTORE_RSI_RDI 144 RESTORE_RSI_RDI
144 xorl %edx, %edx /* Do not leak kernel information */ 145 xorl %edx, %edx /* Do not leak kernel information */
145 xorq %r8, %r8 146 xorq %r8, %r8
@@ -205,7 +206,6 @@ sysexit_from_sys_call:
205 movl RDX(%rsp), %edx /* arg3 */ 206 movl RDX(%rsp), %edx /* arg3 */
206 movl RSI(%rsp), %ecx /* arg4 */ 207 movl RSI(%rsp), %ecx /* arg4 */
207 movl RDI(%rsp), %r8d /* arg5 */ 208 movl RDI(%rsp), %r8d /* arg5 */
208 movl %ebp, %r9d /* arg6 */
209 .endm 209 .endm
210 210
211 .macro auditsys_exit exit 211 .macro auditsys_exit exit
@@ -220,7 +220,6 @@ sysexit_from_sys_call:
2201: setbe %al /* 1 if error, 0 if not */ 2201: setbe %al /* 1 if error, 0 if not */
221 movzbl %al, %edi /* zero-extend that into %edi */ 221 movzbl %al, %edi /* zero-extend that into %edi */
222 call __audit_syscall_exit 222 call __audit_syscall_exit
223 movq RAX(%rsp), %rax /* reload syscall return value */
224 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi 223 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
225 DISABLE_INTERRUPTS(CLBR_NONE) 224 DISABLE_INTERRUPTS(CLBR_NONE)
226 TRACE_IRQS_OFF 225 TRACE_IRQS_OFF
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
236 235
237sysenter_auditsys: 236sysenter_auditsys:
238 auditsys_entry_common 237 auditsys_entry_common
238 movl %ebp, %r9d /* reload 6th syscall arg */
239 jmp sysenter_dispatch 239 jmp sysenter_dispatch
240 240
241sysexit_audit: 241sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
336 * 32-bit zero extended: 336 * 32-bit zero extended:
337 */ 337 */
338 ASM_STAC 338 ASM_STAC
3391: movl (%r8), %ebp 3391: movl (%r8), %r9d
340 _ASM_EXTABLE(1b, ia32_badarg) 340 _ASM_EXTABLE(1b, ia32_badarg)
341 ASM_CLAC 341 ASM_CLAC
342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
346cstar_do_call: 346cstar_do_call:
347 /* 32-bit syscall -> 64-bit C ABI argument conversion */ 347 /* 32-bit syscall -> 64-bit C ABI argument conversion */
348 movl %edi, %r8d /* arg5 */ 348 movl %edi, %r8d /* arg5 */
349 movl %ebp, %r9d /* arg6 */ 349 /* r9 already loaded */ /* arg6 */
350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ 350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
351 movl %ebx, %edi /* arg1 */ 351 movl %ebx, %edi /* arg1 */
352 movl %edx, %edx /* arg3 (zero extension) */ 352 movl %edx, %edx /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
358 call *ia32_sys_call_table(, %rax, 8) 358 call *ia32_sys_call_table(, %rax, 8)
359 movq %rax, RAX(%rsp) 359 movq %rax, RAX(%rsp)
3601: 3601:
361 movl RCX(%rsp), %ebp
362 DISABLE_INTERRUPTS(CLBR_NONE) 361 DISABLE_INTERRUPTS(CLBR_NONE)
363 TRACE_IRQS_OFF 362 TRACE_IRQS_OFF
364 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 363 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -369,6 +368,7 @@ sysretl_from_sys_call:
369 RESTORE_RSI_RDI_RDX 368 RESTORE_RSI_RDI_RDX
370 movl RIP(%rsp), %ecx 369 movl RIP(%rsp), %ecx
371 movl EFLAGS(%rsp), %r11d 370 movl EFLAGS(%rsp), %r11d
371 movq RAX(%rsp), %rax
372 xorq %r10, %r10 372 xorq %r10, %r10
373 xorq %r9, %r9 373 xorq %r9, %r9
374 xorq %r8, %r8 374 xorq %r8, %r8
@@ -392,7 +392,9 @@ sysretl_from_sys_call:
392 392
393#ifdef CONFIG_AUDITSYSCALL 393#ifdef CONFIG_AUDITSYSCALL
394cstar_auditsys: 394cstar_auditsys:
395 movl %r9d, R9(%rsp) /* register to be clobbered by call */
395 auditsys_entry_common 396 auditsys_entry_common
397 movl R9(%rsp), %r9d /* reload 6th syscall arg */
396 jmp cstar_dispatch 398 jmp cstar_dispatch
397 399
398sysretl_audit: 400sysretl_audit:
@@ -404,14 +406,16 @@ cstar_tracesys:
404 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 406 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
405 jz cstar_auditsys 407 jz cstar_auditsys
406#endif 408#endif
409 xchgl %r9d, %ebp
407 SAVE_EXTRA_REGS 410 SAVE_EXTRA_REGS
408 xorl %eax, %eax /* Do not leak kernel information */ 411 xorl %eax, %eax /* Do not leak kernel information */
409 movq %rax, R11(%rsp) 412 movq %rax, R11(%rsp)
410 movq %rax, R10(%rsp) 413 movq %rax, R10(%rsp)
411 movq %rax, R9(%rsp) 414 movq %r9, R9(%rsp)
412 movq %rax, R8(%rsp) 415 movq %rax, R8(%rsp)
413 movq %rsp, %rdi /* &pt_regs -> arg1 */ 416 movq %rsp, %rdi /* &pt_regs -> arg1 */
414 call syscall_trace_enter 417 call syscall_trace_enter
418 movl R9(%rsp), %r9d
415 419
416 /* Reload arg registers from stack. (see sysenter_tracesys) */ 420 /* Reload arg registers from stack. (see sysenter_tracesys) */
417 movl RCX(%rsp), %ecx 421 movl RCX(%rsp), %ecx
@@ -421,6 +425,7 @@ cstar_tracesys:
421 movl %eax, %eax /* zero extension */ 425 movl %eax, %eax /* zero extension */
422 426
423 RESTORE_EXTRA_REGS 427 RESTORE_EXTRA_REGS
428 xchgl %ebp, %r9d
424 jmp cstar_do_call 429 jmp cstar_do_call
425END(entry_SYSCALL_compat) 430END(entry_SYSCALL_compat)
426 431
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 4dd1f2d770af..aeac434c9feb 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -9,3 +9,4 @@ generic-y += cputime.h
9generic-y += dma-contiguous.h 9generic-y += dma-contiguous.h
10generic-y += early_ioremap.h 10generic-y += early_ioremap.h
11generic-y += mcs_spinlock.h 11generic-y += mcs_spinlock.h
12generic-y += mm-arch-hooks.h
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
280 set_ldt(NULL, 0); 280 set_ldt(NULL, 0);
281} 281}
282 282
283/*
284 * load one particular LDT into the current CPU
285 */
286static inline void load_LDT_nolock(mm_context_t *pc)
287{
288 set_ldt(pc->ldt, pc->size);
289}
290
291static inline void load_LDT(mm_context_t *pc)
292{
293 preempt_disable();
294 load_LDT_nolock(pc);
295 preempt_enable();
296}
297
298static inline unsigned long get_desc_base(const struct desc_struct *desc) 283static inline unsigned long get_desc_base(const struct desc_struct *desc)
299{ 284{
300 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
index 99efebb2f69d..ca3ce9ab9385 100644
--- a/arch/x86/include/asm/espfix.h
+++ b/arch/x86/include/asm/espfix.h
@@ -9,7 +9,7 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
9DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); 9DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
10 10
11extern void init_espfix_bsp(void); 11extern void init_espfix_bsp(void);
12extern void init_espfix_ap(void); 12extern void init_espfix_ap(int cpu);
13 13
14#endif /* CONFIG_X86_64 */ 14#endif /* CONFIG_X86_64 */
15 15
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 0637826292de..c49c5173158e 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -189,6 +189,7 @@ union fpregs_state {
189 struct fxregs_state fxsave; 189 struct fxregs_state fxsave;
190 struct swregs_state soft; 190 struct swregs_state soft;
191 struct xregs_state xsave; 191 struct xregs_state xsave;
192 u8 __padding[PAGE_SIZE];
192}; 193};
193 194
194/* 195/*
@@ -198,40 +199,6 @@ union fpregs_state {
198 */ 199 */
199struct fpu { 200struct fpu {
200 /* 201 /*
201 * @state:
202 *
203 * In-memory copy of all FPU registers that we save/restore
204 * over context switches. If the task is using the FPU then
205 * the registers in the FPU are more recent than this state
206 * copy. If the task context-switches away then they get
207 * saved here and represent the FPU state.
208 *
209 * After context switches there may be a (short) time period
210 * during which the in-FPU hardware registers are unchanged
211 * and still perfectly match this state, if the tasks
212 * scheduled afterwards are not using the FPU.
213 *
214 * This is the 'lazy restore' window of optimization, which
215 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
216 *
217 * We detect whether a subsequent task uses the FPU via setting
218 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
219 *
220 * During this window, if the task gets scheduled again, we
221 * might be able to skip having to do a restore from this
222 * memory buffer to the hardware registers - at the cost of
223 * incurring the overhead of #NM fault traps.
224 *
225 * Note that on modern CPUs that support the XSAVEOPT (or other
226 * optimized XSAVE instructions), we don't use #NM traps anymore,
227 * as the hardware can track whether FPU registers need saving
228 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
229 * logic, which unconditionally saves/restores all FPU state
230 * across context switches. (if FPU state exists.)
231 */
232 union fpregs_state state;
233
234 /*
235 * @last_cpu: 202 * @last_cpu:
236 * 203 *
237 * Records the last CPU on which this context was loaded into 204 * Records the last CPU on which this context was loaded into
@@ -288,6 +255,43 @@ struct fpu {
288 * deal with bursty apps that only use the FPU for a short time: 255 * deal with bursty apps that only use the FPU for a short time:
289 */ 256 */
290 unsigned char counter; 257 unsigned char counter;
258 /*
259 * @state:
260 *
261 * In-memory copy of all FPU registers that we save/restore
262 * over context switches. If the task is using the FPU then
263 * the registers in the FPU are more recent than this state
264 * copy. If the task context-switches away then they get
265 * saved here and represent the FPU state.
266 *
267 * After context switches there may be a (short) time period
268 * during which the in-FPU hardware registers are unchanged
269 * and still perfectly match this state, if the tasks
270 * scheduled afterwards are not using the FPU.
271 *
272 * This is the 'lazy restore' window of optimization, which
273 * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
274 *
275 * We detect whether a subsequent task uses the FPU via setting
276 * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
277 *
278 * During this window, if the task gets scheduled again, we
279 * might be able to skip having to do a restore from this
280 * memory buffer to the hardware registers - at the cost of
281 * incurring the overhead of #NM fault traps.
282 *
283 * Note that on modern CPUs that support the XSAVEOPT (or other
284 * optimized XSAVE instructions), we don't use #NM traps anymore,
285 * as the hardware can track whether FPU registers need saving
286 * or not. On such CPUs we activate the non-lazy ('eagerfpu')
287 * logic, which unconditionally saves/restores all FPU state
288 * across context switches. (if FPU state exists.)
289 */
290 union fpregs_state state;
291 /*
292 * WARNING: 'state' is dynamically-sized. Do not put
293 * anything after it here.
294 */
291}; 295};
292 296
293#endif /* _ASM_X86_FPU_H */ 297#endif /* _ASM_X86_FPU_H */
diff --git a/arch/x86/include/asm/intel_pmc_ipc.h b/arch/x86/include/asm/intel_pmc_ipc.h
index 200ec2e7821d..cd0310e186f4 100644
--- a/arch/x86/include/asm/intel_pmc_ipc.h
+++ b/arch/x86/include/asm/intel_pmc_ipc.h
@@ -25,36 +25,9 @@
25 25
26#if IS_ENABLED(CONFIG_INTEL_PMC_IPC) 26#if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
27 27
28/*
29 * intel_pmc_ipc_simple_command
30 * @cmd: command
31 * @sub: sub type
32 */
33int intel_pmc_ipc_simple_command(int cmd, int sub); 28int intel_pmc_ipc_simple_command(int cmd, int sub);
34
35/*
36 * intel_pmc_ipc_raw_cmd
37 * @cmd: command
38 * @sub: sub type
39 * @in: input data
40 * @inlen: input length in bytes
41 * @out: output data
42 * @outlen: output length in dwords
43 * @sptr: data writing to SPTR register
44 * @dptr: data writing to DPTR register
45 */
46int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, 29int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
47 u32 *out, u32 outlen, u32 dptr, u32 sptr); 30 u32 *out, u32 outlen, u32 dptr, u32 sptr);
48
49/*
50 * intel_pmc_ipc_command
51 * @cmd: command
52 * @sub: sub type
53 * @in: input data
54 * @inlen: input length in bytes
55 * @out: output data
56 * @outlen: output length in dwords
57 */
58int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, 31int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
59 u32 *out, u32 outlen); 32 u32 *out, u32 outlen);
60 33
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422fbad8..74a2a8dc9908 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17extern pte_t kasan_zero_pte[];
18extern pte_t kasan_zero_pmd[];
19extern pte_t kasan_zero_pud[];
20
21#ifdef CONFIG_KASAN 17#ifdef CONFIG_KASAN
22void __init kasan_map_early_shadow(pgd_t *pgd); 18void __init kasan_early_init(void);
23void __init kasan_init(void); 19void __init kasan_init(void);
24#else 20#else
25static inline void kasan_map_early_shadow(pgd_t *pgd) { } 21static inline void kasan_early_init(void) { }
26static inline void kasan_init(void) { } 22static inline void kasan_init(void) { }
27#endif 23#endif
28 24
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2a7f5d782c33..49ec9038ec14 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -604,6 +604,8 @@ struct kvm_arch {
604 bool iommu_noncoherent; 604 bool iommu_noncoherent;
605#define __KVM_HAVE_ARCH_NONCOHERENT_DMA 605#define __KVM_HAVE_ARCH_NONCOHERENT_DMA
606 atomic_t noncoherent_dma_count; 606 atomic_t noncoherent_dma_count;
607#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
608 atomic_t assigned_device_count;
607 struct kvm_pic *vpic; 609 struct kvm_pic *vpic;
608 struct kvm_ioapic *vioapic; 610 struct kvm_ioapic *vioapic;
609 struct kvm_pit *vpit; 611 struct kvm_pit *vpit;
diff --git a/arch/x86/include/asm/mm-arch-hooks.h b/arch/x86/include/asm/mm-arch-hooks.h
deleted file mode 100644
index 4e881a342236..000000000000
--- a/arch/x86/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_X86_MM_ARCH_HOOKS_H
13#define _ASM_X86_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_X86_MM_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
9 * we put the segment information here. 9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 void *ldt; 12 struct ldt_struct *ldt;
13 int size;
14 13
15#ifdef CONFIG_X86_64 14#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */ 15 /* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 5e8daee7c5c9..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
23 23
24static inline void load_mm_cr4(struct mm_struct *mm) 24static inline void load_mm_cr4(struct mm_struct *mm)
25{ 25{
26 if (static_key_true(&rdpmc_always_available) || 26 if (static_key_false(&rdpmc_always_available) ||
27 atomic_read(&mm->context.perf_rdpmc_allowed)) 27 atomic_read(&mm->context.perf_rdpmc_allowed))
28 cr4_set_bits(X86_CR4_PCE); 28 cr4_set_bits(X86_CR4_PCE);
29 else 29 else
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif 34#endif
35 35
36/* 36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41 /*
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
46 */
47 struct desc_struct *entries;
48 int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53 struct ldt_struct *ldt;
54
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
57
58 /*
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
64 * any user code.
65 *
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
69 * that we can see.
70 */
71
72 if (unlikely(ldt))
73 set_ldt(ldt->entries, ldt->size);
74 else
75 clear_LDT();
76
77 DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
37 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
38 */ 82 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
78 * was called and then modify_ldt changed 122 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU. 123 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we 124 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That 125 * never set context.ldt to NULL while the mm still
82 * means that next->context.ldt != prev->context.ldt, 126 * exists. That means that next->context.ldt !=
83 * because mms never share an LDT. 127 * prev->context.ldt, because mms never share an LDT.
84 */ 128 */
85 if (unlikely(prev->context.ldt != next->context.ldt)) 129 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context); 130 load_mm_ldt(next);
87 } 131 }
88#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
89 else { 133 else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 load_cr3(next->pgd); 150 load_cr3(next->pgd);
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108 load_mm_cr4(next); 152 load_mm_cr4(next);
109 load_LDT_nolock(&next->context); 153 load_mm_ldt(next);
110 } 154 }
111 } 155 }
112#endif 156#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 43e6519df0d5..944f1785ed0d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -390,9 +390,6 @@ struct thread_struct {
390#endif 390#endif
391 unsigned long gs; 391 unsigned long gs;
392 392
393 /* Floating point and extended processor state */
394 struct fpu fpu;
395
396 /* Save middle states of ptrace breakpoints */ 393 /* Save middle states of ptrace breakpoints */
397 struct perf_event *ptrace_bps[HBP_NUM]; 394 struct perf_event *ptrace_bps[HBP_NUM];
398 /* Debug status used for traps, single steps, etc... */ 395 /* Debug status used for traps, single steps, etc... */
@@ -418,6 +415,13 @@ struct thread_struct {
418 unsigned long iopl; 415 unsigned long iopl;
419 /* Max allowed port in the bitmap, in bytes: */ 416 /* Max allowed port in the bitmap, in bytes: */
420 unsigned io_bitmap_max; 417 unsigned io_bitmap_max;
418
419 /* Floating point and extended processor state */
420 struct fpu fpu;
421 /*
422 * WARNING: 'fpu' is dynamically-sized. It *MUST* be at
423 * the end.
424 */
421}; 425};
422 426
423/* 427/*
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
57 unsigned long ip; 57 unsigned long ip;
58 unsigned long flags; 58 unsigned long flags;
59 unsigned short cs; 59 unsigned short cs;
60 unsigned short __pad2; /* Was called gs, but was always zero. */ 60 unsigned short gs;
61 unsigned short __pad1; /* Was called fs, but was always zero. */ 61 unsigned short fs;
62 unsigned short ss; 62 unsigned short __pad0;
63 unsigned long err; 63 unsigned long err;
64 unsigned long trapno; 64 unsigned long trapno;
65 unsigned long oldmask; 65 unsigned long oldmask;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b7bf11..d7f3b3b78ac3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@ do { \
79#else /* CONFIG_X86_32 */ 79#else /* CONFIG_X86_32 */
80 80
81/* frame pointer must be last for get_wchan */ 81/* frame pointer must be last for get_wchan */
82#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" 82#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" 83#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
84 84
85#define __EXTRA_CLOBBER \ 85#define __EXTRA_CLOBBER \
86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ 86 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
87 "r12", "r13", "r14", "r15", "flags" 87 "r12", "r13", "r14", "r15"
88 88
89#ifdef CONFIG_CC_STACKPROTECTOR 89#ifdef CONFIG_CC_STACKPROTECTOR
90#define __switch_canary \ 90#define __switch_canary \
@@ -100,11 +100,7 @@ do { \
100#define __switch_canary_iparam 100#define __switch_canary_iparam
101#endif /* CC_STACKPROTECTOR */ 101#endif /* CC_STACKPROTECTOR */
102 102
103/* 103/* Save restore flags to clear handle leaking NT */
104 * There is no need to save or restore flags, because flags are always
105 * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
106 * has no effect.
107 */
108#define switch_to(prev, next, last) \ 104#define switch_to(prev, next, last) \
109 asm volatile(SAVE_CONTEXT \ 105 asm volatile(SAVE_CONTEXT \
110 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ 106 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 8fba544e9cc4..f36d56bd7632 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -108,6 +108,8 @@
108#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4) 108#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
109/* Support for a virtual guest idle state is available */ 109/* Support for a virtual guest idle state is available */
110#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5) 110#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
111/* Guest crash data handler available */
112#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
111 113
112/* 114/*
113 * Implementation recommendations. Indicates which behaviors the hypervisor 115 * Implementation recommendations. Indicates which behaviors the hypervisor
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
354struct kvm_sync_regs { 354struct kvm_sync_regs {
355}; 355};
356 356
357#define KVM_QUIRK_LINT0_REENABLED (1 << 0) 357#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
358#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) 358#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
359 359
360#endif /* _ASM_X86_KVM_H */ 360#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
177 __u64 rip; 177 __u64 rip;
178 __u64 eflags; /* RFLAGS */ 178 __u64 eflags; /* RFLAGS */
179 __u16 cs; 179 __u16 cs;
180 180 __u16 gs;
181 /* 181 __u16 fs;
182 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), 182 __u16 __pad0;
183 * Linux saved and restored fs and gs in these slots. This
184 * was counterproductive, as fsbase and gsbase were never
185 * saved, so arch_prctl was presumably unreliable.
186 *
187 * If these slots are ever needed for any other purpose, there
188 * is some risk that very old 64-bit binaries could get
189 * confused. I doubt that many such binaries still work,
190 * though, since the same patch in 2.5.64 also removed the
191 * 64-bit set_thread_area syscall, so it appears that there is
192 * no TLS API that works in both pre- and post-2.5.64 kernels.
193 */
194 __u16 __pad2; /* Was gs. */
195 __u16 __pad1; /* Was fs. */
196
197 __u16 ss;
198 __u64 err; 183 __u64 err;
199 __u64 trapno; 184 __u64 trapno;
200 __u64 oldmask; 185 __u64 oldmask;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
943 */ 943 */
944 if (irq < nr_legacy_irqs() && data->count == 1) { 944 if (irq < nr_legacy_irqs() && data->count == 1) {
945 if (info->ioapic_trigger != data->trigger) 945 if (info->ioapic_trigger != data->trigger)
946 mp_register_handler(irq, data->trigger); 946 mp_register_handler(irq, info->ioapic_trigger);
947 data->entry.trigger = data->trigger = info->ioapic_trigger; 947 data->entry.trigger = data->trigger = info->ioapic_trigger;
948 data->entry.polarity = data->polarity = info->ioapic_polarity; 948 data->entry.polarity = data->polarity = info->ioapic_polarity;
949 } 949 }
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 28eba2d38b15..2683f36e4e0a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
322 irq_data->chip = &lapic_controller; 322 irq_data->chip = &lapic_controller;
323 irq_data->chip_data = data; 323 irq_data->chip_data = data;
324 irq_data->hwirq = virq + i; 324 irq_data->hwirq = virq + i;
325 err = assign_irq_vector_policy(virq, irq_data->node, data, 325 err = assign_irq_vector_policy(virq + i, irq_data->node, data,
326 info); 326 info);
327 if (err) 327 if (err)
328 goto error; 328 goto error;
@@ -409,12 +409,6 @@ static void __setup_vector_irq(int cpu)
409 int irq, vector; 409 int irq, vector;
410 struct apic_chip_data *data; 410 struct apic_chip_data *data;
411 411
412 /*
413 * vector_lock will make sure that we don't run into irq vector
414 * assignments that might be happening on another cpu in parallel,
415 * while we setup our initial vector to irq mappings.
416 */
417 raw_spin_lock(&vector_lock);
418 /* Mark the inuse vectors */ 412 /* Mark the inuse vectors */
419 for_each_active_irq(irq) { 413 for_each_active_irq(irq) {
420 data = apic_chip_data(irq_get_irq_data(irq)); 414 data = apic_chip_data(irq_get_irq_data(irq));
@@ -436,16 +430,16 @@ static void __setup_vector_irq(int cpu)
436 if (!cpumask_test_cpu(cpu, data->domain)) 430 if (!cpumask_test_cpu(cpu, data->domain))
437 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; 431 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
438 } 432 }
439 raw_spin_unlock(&vector_lock);
440} 433}
441 434
442/* 435/*
443 * Setup the vector to irq mappings. 436 * Setup the vector to irq mappings. Must be called with vector_lock held.
444 */ 437 */
445void setup_vector_irq(int cpu) 438void setup_vector_irq(int cpu)
446{ 439{
447 int irq; 440 int irq;
448 441
442 lockdep_assert_held(&vector_lock);
449 /* 443 /*
450 * On most of the platforms, legacy PIC delivers the interrupts on the 444 * On most of the platforms, legacy PIC delivers the interrupts on the
451 * boot cpu. But there are certain platforms where PIC interrupts are 445 * boot cpu. But there are certain platforms where PIC interrupts are
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
1410 load_sp0(t, &current->thread); 1410 load_sp0(t, &current->thread);
1411 set_tss_desc(cpu, t); 1411 set_tss_desc(cpu, t);
1412 load_TR_desc(); 1412 load_TR_desc();
1413 load_LDT(&init_mm.context); 1413 load_mm_ldt(&init_mm);
1414 1414
1415 clear_all_debug_regs(); 1415 clear_all_debug_regs();
1416 dbg_restore_debug_regs(); 1416 dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
1459 load_sp0(t, thread); 1459 load_sp0(t, thread);
1460 set_tss_desc(cpu, t); 1460 set_tss_desc(cpu, t);
1461 load_TR_desc(); 1461 load_TR_desc();
1462 load_LDT(&init_mm.context); 1462 load_mm_ldt(&init_mm);
1463 1463
1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1465 1465
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
2179 int idx = segment >> 3; 2179 int idx = segment >> 3;
2180 2180
2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2182 struct ldt_struct *ldt;
2183
2182 if (idx > LDT_ENTRIES) 2184 if (idx > LDT_ENTRIES)
2183 return 0; 2185 return 0;
2184 2186
2185 if (idx > current->active_mm->context.size) 2187 /* IRQs are off, so this synchronizes with smp_store_release */
2188 ldt = lockless_dereference(current->active_mm->context.ldt);
2189 if (!ldt || idx > ldt->size)
2186 return 0; 2190 return 0;
2187 2191
2188 desc = current->active_mm->context.ldt; 2192 desc = &ldt->entries[idx];
2189 } else { 2193 } else {
2190 if (idx > GDT_ENTRIES) 2194 if (idx > GDT_ENTRIES)
2191 return 0; 2195 return 0;
2192 2196
2193 desc = raw_cpu_ptr(gdt_page.gdt); 2197 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2194 } 2198 }
2195 2199
2196 return get_desc_base(desc + idx); 2200 return get_desc_base(desc);
2197} 2201}
2198 2202
2199#ifdef CONFIG_COMPAT 2203#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a981fb2..6326ae24e4d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2535 cpuc->shared_regs = allocate_shared_regs(cpu); 2535 cpuc->shared_regs = allocate_shared_regs(cpu);
2536 if (!cpuc->shared_regs) 2536 if (!cpuc->shared_regs)
2537 return NOTIFY_BAD; 2537 goto err;
2538 } 2538 }
2539 2539
2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
2542 2542
2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); 2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2544 if (!cpuc->constraint_list) 2544 if (!cpuc->constraint_list)
2545 return NOTIFY_BAD; 2545 goto err_shared_regs;
2546 2546
2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2548 if (!cpuc->excl_cntrs) { 2548 if (!cpuc->excl_cntrs)
2549 kfree(cpuc->constraint_list); 2549 goto err_constraint_list;
2550 kfree(cpuc->shared_regs); 2550
2551 return NOTIFY_BAD;
2552 }
2553 cpuc->excl_thread_id = 0; 2551 cpuc->excl_thread_id = 0;
2554 } 2552 }
2555 2553
2556 return NOTIFY_OK; 2554 return NOTIFY_OK;
2555
2556err_constraint_list:
2557 kfree(cpuc->constraint_list);
2558 cpuc->constraint_list = NULL;
2559
2560err_shared_regs:
2561 kfree(cpuc->shared_regs);
2562 cpuc->shared_regs = NULL;
2563
2564err:
2565 return NOTIFY_BAD;
2557} 2566}
2558 2567
2559static void intel_pmu_cpu_starting(int cpu) 2568static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 188076161c1b..377e8f8ed391 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
952 return 0; 952 return 0;
953 953
954 /* 954 /*
955 * Getting up-to-date values requires an SMP IPI which is not
956 * possible if we're being called in interrupt context. Return
957 * the cached values instead.
958 */
959 if (unlikely(in_interrupt()))
960 goto out;
961
962 /*
955 * Notice that we don't perform the reading of an RMID 963 * Notice that we don't perform the reading of an RMID
956 * atomically, because we can't hold a spin lock across the 964 * atomically, because we can't hold a spin lock across the
957 * IPIs. 965 * IPIs.
@@ -1247,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
1247 cpumask_set_cpu(cpu, &cqm_cpumask); 1255 cpumask_set_cpu(cpu, &cqm_cpumask);
1248} 1256}
1249 1257
1250static void intel_cqm_cpu_prepare(unsigned int cpu) 1258static void intel_cqm_cpu_starting(unsigned int cpu)
1251{ 1259{
1252 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); 1260 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
1253 struct cpuinfo_x86 *c = &cpu_data(cpu); 1261 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1288,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
1288 unsigned int cpu = (unsigned long)hcpu; 1296 unsigned int cpu = (unsigned long)hcpu;
1289 1297
1290 switch (action & ~CPU_TASKS_FROZEN) { 1298 switch (action & ~CPU_TASKS_FROZEN) {
1291 case CPU_UP_PREPARE:
1292 intel_cqm_cpu_prepare(cpu);
1293 break;
1294 case CPU_DOWN_PREPARE: 1299 case CPU_DOWN_PREPARE:
1295 intel_cqm_cpu_exit(cpu); 1300 intel_cqm_cpu_exit(cpu);
1296 break; 1301 break;
1297 case CPU_STARTING: 1302 case CPU_STARTING:
1303 intel_cqm_cpu_starting(cpu);
1298 cqm_pick_event_reader(cpu); 1304 cqm_pick_event_reader(cpu);
1299 break; 1305 break;
1300 } 1306 }
@@ -1365,7 +1371,7 @@ static int __init intel_cqm_init(void)
1365 goto out; 1371 goto out;
1366 1372
1367 for_each_online_cpu(i) { 1373 for_each_online_cpu(i) {
1368 intel_cqm_cpu_prepare(i); 1374 intel_cqm_cpu_starting(i);
1369 cqm_pick_event_reader(i); 1375 cqm_pick_event_reader(i);
1370 } 1376 }
1371 1377
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 89427d8d4fc5..eec40f595ab9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -175,7 +175,9 @@ static __init void early_serial_init(char *s)
175 } 175 }
176 176
177 if (*s) { 177 if (*s) {
178 if (kstrtoul(s, 0, &baud) < 0 || baud == 0) 178 baud = simple_strtoull(s, &e, 0);
179
180 if (baud == 0 || s == e)
179 baud = DEFAULT_BAUD; 181 baud = DEFAULT_BAUD;
180 } 182 }
181 183
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index f5d0730e7b08..ce95676abd60 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -131,25 +131,24 @@ void __init init_espfix_bsp(void)
131 init_espfix_random(); 131 init_espfix_random();
132 132
133 /* The rest is the same as for any other processor */ 133 /* The rest is the same as for any other processor */
134 init_espfix_ap(); 134 init_espfix_ap(0);
135} 135}
136 136
137void init_espfix_ap(void) 137void init_espfix_ap(int cpu)
138{ 138{
139 unsigned int cpu, page; 139 unsigned int page;
140 unsigned long addr; 140 unsigned long addr;
141 pud_t pud, *pud_p; 141 pud_t pud, *pud_p;
142 pmd_t pmd, *pmd_p; 142 pmd_t pmd, *pmd_p;
143 pte_t pte, *pte_p; 143 pte_t pte, *pte_p;
144 int n; 144 int n, node;
145 void *stack_page; 145 void *stack_page;
146 pteval_t ptemask; 146 pteval_t ptemask;
147 147
148 /* We only have to do this once... */ 148 /* We only have to do this once... */
149 if (likely(this_cpu_read(espfix_stack))) 149 if (likely(per_cpu(espfix_stack, cpu)))
150 return; /* Already initialized */ 150 return; /* Already initialized */
151 151
152 cpu = smp_processor_id();
153 addr = espfix_base_addr(cpu); 152 addr = espfix_base_addr(cpu);
154 page = cpu/ESPFIX_STACKS_PER_PAGE; 153 page = cpu/ESPFIX_STACKS_PER_PAGE;
155 154
@@ -165,12 +164,15 @@ void init_espfix_ap(void)
165 if (stack_page) 164 if (stack_page)
166 goto unlock_done; 165 goto unlock_done;
167 166
167 node = cpu_to_node(cpu);
168 ptemask = __supported_pte_mask; 168 ptemask = __supported_pte_mask;
169 169
170 pud_p = &espfix_pud_page[pud_index(addr)]; 170 pud_p = &espfix_pud_page[pud_index(addr)];
171 pud = *pud_p; 171 pud = *pud_p;
172 if (!pud_present(pud)) { 172 if (!pud_present(pud)) {
173 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); 173 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
174
175 pmd_p = (pmd_t *)page_address(page);
174 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); 176 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
175 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); 177 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
176 for (n = 0; n < ESPFIX_PUD_CLONES; n++) 178 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
@@ -180,7 +182,9 @@ void init_espfix_ap(void)
180 pmd_p = pmd_offset(&pud, addr); 182 pmd_p = pmd_offset(&pud, addr);
181 pmd = *pmd_p; 183 pmd = *pmd_p;
182 if (!pmd_present(pmd)) { 184 if (!pmd_present(pmd)) {
183 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); 185 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
186
187 pte_p = (pte_t *)page_address(page);
184 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); 188 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
185 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT); 189 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
186 for (n = 0; n < ESPFIX_PMD_CLONES; n++) 190 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
@@ -188,7 +192,7 @@ void init_espfix_ap(void)
188 } 192 }
189 193
190 pte_p = pte_offset_kernel(&pmd, addr); 194 pte_p = pte_offset_kernel(&pmd, addr);
191 stack_page = (void *)__get_free_page(GFP_KERNEL); 195 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
192 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 196 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
193 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 197 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
194 set_pte(&pte_p[n*PTE_STRIDE], pte); 198 set_pte(&pte_p[n*PTE_STRIDE], pte);
@@ -199,7 +203,7 @@ void init_espfix_ap(void)
199unlock_done: 203unlock_done:
200 mutex_unlock(&espfix_init_mutex); 204 mutex_unlock(&espfix_init_mutex);
201done: 205done:
202 this_cpu_write(espfix_stack, addr); 206 per_cpu(espfix_stack, cpu) = addr;
203 this_cpu_write(espfix_waddr, (unsigned long)stack_page 207 per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
204 + (addr & ~PAGE_MASK)); 208 + (addr & ~PAGE_MASK);
205} 209}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 79de954626fd..d25097c3fc1d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
270 dst_fpu->fpregs_active = 0; 270 dst_fpu->fpregs_active = 0;
271 dst_fpu->last_cpu = -1; 271 dst_fpu->last_cpu = -1;
272 272
273 if (src_fpu->fpstate_active) 273 if (src_fpu->fpstate_active && cpu_has_fpu)
274 fpu_copy(dst_fpu, src_fpu); 274 fpu_copy(dst_fpu, src_fpu);
275 275
276 return 0; 276 return 0;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 32826791e675..d14e9ac3235a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -4,6 +4,8 @@
4#include <asm/fpu/internal.h> 4#include <asm/fpu/internal.h>
5#include <asm/tlbflush.h> 5#include <asm/tlbflush.h>
6 6
7#include <linux/sched.h>
8
7/* 9/*
8 * Initialize the TS bit in CR0 according to the style of context-switches 10 * Initialize the TS bit in CR0 according to the style of context-switches
9 * we are using: 11 * we are using:
@@ -38,7 +40,12 @@ static void fpu__init_cpu_generic(void)
38 write_cr0(cr0); 40 write_cr0(cr0);
39 41
40 /* Flush out any pending x87 state: */ 42 /* Flush out any pending x87 state: */
41 asm volatile ("fninit"); 43#ifdef CONFIG_MATH_EMULATION
44 if (!cpu_has_fpu)
45 fpstate_init_soft(&current->thread.fpu.state.soft);
46 else
47#endif
48 asm volatile ("fninit");
42} 49}
43 50
44/* 51/*
@@ -136,6 +143,43 @@ static void __init fpu__init_system_generic(void)
136unsigned int xstate_size; 143unsigned int xstate_size;
137EXPORT_SYMBOL_GPL(xstate_size); 144EXPORT_SYMBOL_GPL(xstate_size);
138 145
146/* Enforce that 'MEMBER' is the last field of 'TYPE': */
147#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
148 BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
149
150/*
151 * We append the 'struct fpu' to the task_struct:
152 */
153static void __init fpu__init_task_struct_size(void)
154{
155 int task_size = sizeof(struct task_struct);
156
157 /*
158 * Subtract off the static size of the register state.
159 * It potentially has a bunch of padding.
160 */
161 task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
162
163 /*
164 * Add back the dynamically-calculated register state
165 * size.
166 */
167 task_size += xstate_size;
168
169 /*
170 * We dynamically size 'struct fpu', so we require that
171 * it be at the end of 'thread_struct' and that
172 * 'thread_struct' be at the end of 'task_struct'. If
173 * you hit a compile error here, check the structure to
174 * see if something got added to the end.
175 */
176 CHECK_MEMBER_AT_END_OF(struct fpu, state);
177 CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
178 CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
179
180 arch_task_struct_size = task_size;
181}
182
139/* 183/*
140 * Set up the xstate_size based on the legacy FPU context size. 184 * Set up the xstate_size based on the legacy FPU context size.
141 * 185 *
@@ -287,6 +331,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
287 fpu__init_system_generic(); 331 fpu__init_system_generic();
288 fpu__init_system_xstate_size_legacy(); 332 fpu__init_system_xstate_size_legacy();
289 fpu__init_system_xstate(); 333 fpu__init_system_xstate();
334 fpu__init_task_struct_size();
290 335
291 fpu__init_system_ctx_switch(); 336 fpu__init_system_ctx_switch();
292} 337}
@@ -311,9 +356,15 @@ static int __init x86_noxsave_setup(char *s)
311 356
312 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 357 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
313 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 358 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
359 setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
314 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 360 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
315 setup_clear_cpu_cap(X86_FEATURE_AVX); 361 setup_clear_cpu_cap(X86_FEATURE_AVX);
316 setup_clear_cpu_cap(X86_FEATURE_AVX2); 362 setup_clear_cpu_cap(X86_FEATURE_AVX2);
363 setup_clear_cpu_cap(X86_FEATURE_AVX512F);
364 setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
365 setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
366 setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
367 setup_clear_cpu_cap(X86_FEATURE_MPX);
317 368
318 return 1; 369 return 1;
319} 370}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5a4668136e98..f129a9af6357 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
161 /* Kill off the identity-map trampoline */ 161 /* Kill off the identity-map trampoline */
162 reset_early_page_tables(); 162 reset_early_page_tables();
163 163
164 kasan_map_early_shadow(early_level4_pgt);
165
166 /* clear bss before set_intr_gate with early_idt_handler */
167 clear_bss(); 164 clear_bss();
168 165
166 clear_page(init_level4_pgt);
167
168 kasan_early_init();
169
169 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) 170 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
170 set_intr_gate(i, early_idt_handler_array[i]); 171 set_intr_gate(i, early_idt_handler_array[i]);
171 load_idt((const struct desc_ptr *)&idt_descr); 172 load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
177 */ 178 */
178 load_ucode_bsp(); 179 load_ucode_bsp();
179 180
180 clear_page(init_level4_pgt);
181 /* set init_level4_pgt kernel high mapping*/ 181 /* set init_level4_pgt kernel high mapping*/
182 init_level4_pgt[511] = early_level4_pgt[511]; 182 init_level4_pgt[511] = early_level4_pgt[511];
183 183
184 kasan_map_early_shadow(init_level4_pgt);
185
186 x86_64_start_reservations(real_mode_data); 184 x86_64_start_reservations(real_mode_data);
187} 185}
188 186
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e5c27f729a38..1d40ca8a73f2 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@ ENTRY(phys_base)
516 /* This must match the first entry in level2_kernel_pgt */ 516 /* This must match the first entry in level2_kernel_pgt */
517 .quad 0x0000000000000000 517 .quad 0x0000000000000000
518 518
519#ifdef CONFIG_KASAN
520#define FILL(VAL, COUNT) \
521 .rept (COUNT) ; \
522 .quad (VAL) ; \
523 .endr
524
525NEXT_PAGE(kasan_zero_pte)
526 FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
527NEXT_PAGE(kasan_zero_pmd)
528 FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
529NEXT_PAGE(kasan_zero_pud)
530 FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
531
532#undef FILL
533#endif
534
535
536#include "../../x86/xen/xen-head.S" 519#include "../../x86/xen/xen-head.S"
537 520
538 __PAGE_ALIGNED_BSS 521 __PAGE_ALIGNED_BSS
539NEXT_PAGE(empty_zero_page) 522NEXT_PAGE(empty_zero_page)
540 .skip PAGE_SIZE 523 .skip PAGE_SIZE
541 524
542#ifdef CONFIG_KASAN
543/*
544 * This page used as early shadow. We don't use empty_zero_page
545 * at early stages, stack instrumentation could write some garbage
546 * to this page.
547 * Latter we reuse it as zero shadow for large ranges of memory
548 * that allowed to access, but not instrumented by kasan
549 * (vmalloc/vmemmap ...).
550 */
551NEXT_PAGE(kasan_zero_page)
552 .skip PAGE_SIZE
553#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 88b366487b0e..c7dfe1be784e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void)
347 if (!desc) 347 if (!desc)
348 continue; 348 continue;
349 349
350 /*
351 * Protect against concurrent action removal,
352 * affinity changes etc.
353 */
354 raw_spin_lock(&desc->lock);
350 data = irq_desc_get_irq_data(desc); 355 data = irq_desc_get_irq_data(desc);
351 cpumask_copy(&affinity_new, data->affinity); 356 cpumask_copy(&affinity_new, data->affinity);
352 cpumask_clear_cpu(this_cpu, &affinity_new); 357 cpumask_clear_cpu(this_cpu, &affinity_new);
353 358
354 /* Do not count inactive or per-cpu irqs. */ 359 /* Do not count inactive or per-cpu irqs. */
355 if (!irq_has_action(irq) || irqd_is_per_cpu(data)) 360 if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
361 raw_spin_unlock(&desc->lock);
356 continue; 362 continue;
363 }
357 364
365 raw_spin_unlock(&desc->lock);
358 /* 366 /*
359 * A single irq may be mapped to multiple 367 * A single irq may be mapped to multiple
360 * cpu's vector_irq[] (for example IOAPIC cluster 368 * cpu's vector_irq[] (for example IOAPIC cluster
@@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void)
385 * vector. If the vector is marked in the used vectors 393 * vector. If the vector is marked in the used vectors
386 * bitmap or an irq is assigned to it, we don't count 394 * bitmap or an irq is assigned to it, we don't count
387 * it as available. 395 * it as available.
396 *
397 * As this is an inaccurate snapshot anyway, we can do
398 * this w/o holding vector_lock.
388 */ 399 */
389 for (vector = FIRST_EXTERNAL_VECTOR; 400 for (vector = FIRST_EXTERNAL_VECTOR;
390 vector < first_system_vector; vector++) { 401 vector < first_system_vector; vector++) {
@@ -486,6 +497,11 @@ void fixup_irqs(void)
486 */ 497 */
487 mdelay(1); 498 mdelay(1);
488 499
500 /*
501 * We can walk the vector array of this cpu without holding
502 * vector_lock because the cpu is already marked !online, so
503 * nothing else will touch it.
504 */
489 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 505 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
490 unsigned int irr; 506 unsigned int irr;
491 507
@@ -497,9 +513,9 @@ void fixup_irqs(void)
497 irq = __this_cpu_read(vector_irq[vector]); 513 irq = __this_cpu_read(vector_irq[vector]);
498 514
499 desc = irq_to_desc(irq); 515 desc = irq_to_desc(irq);
516 raw_spin_lock(&desc->lock);
500 data = irq_desc_get_irq_data(desc); 517 data = irq_desc_get_irq_data(desc);
501 chip = irq_data_get_irq_chip(data); 518 chip = irq_data_get_irq_chip(data);
502 raw_spin_lock(&desc->lock);
503 if (chip->irq_retrigger) { 519 if (chip->irq_retrigger) {
504 chip->irq_retrigger(data); 520 chip->irq_retrigger(data);
505 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED); 521 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17 18
@@ -20,82 +21,82 @@
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 22#include <asm/syscalls.h>
22 23
23#ifdef CONFIG_SMP 24/* context.lock is held for us, so we don't need any locking. */
24static void flush_ldt(void *current_mm) 25static void flush_ldt(void *current_mm)
25{ 26{
26 if (current->active_mm == current_mm) 27 mm_context_t *pc;
27 load_LDT(&current->active_mm->context); 28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = &current->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
28} 34}
29#endif
30 35
31static int alloc_ldt(mm_context_t *pc, int mincount, int reload) 36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(int size)
32{ 38{
33 void *oldldt, *newldt; 39 struct ldt_struct *new_ldt;
34 int oldsize; 40 int alloc_size;
35 41
36 if (mincount <= pc->size) 42 if (size > LDT_ENTRIES)
37 return 0; 43 return NULL;
38 oldsize = pc->size; 44
39 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
40 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 46 if (!new_ldt)
41 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 47 return NULL;
42 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
43 else 60 else
44 newldt = (void *)__get_free_page(GFP_KERNEL); 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
45
46 if (!newldt)
47 return -ENOMEM;
48 62
49 if (oldsize) 63 if (!new_ldt->entries) {
50 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); 64 kfree(new_ldt);
51 oldldt = pc->ldt; 65 return NULL;
52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 66 }
53 (mincount - oldsize) * LDT_ENTRY_SIZE);
54 67
55 paravirt_alloc_ldt(newldt, mincount); 68 new_ldt->size = size;
69 return new_ldt;
70}
56 71
57#ifdef CONFIG_X86_64 72/* After calling this, the LDT is immutable. */
58 /* CHECKME: Do we really need this ? */ 73static void finalize_ldt_struct(struct ldt_struct *ldt)
59 wmb(); 74{
60#endif 75 paravirt_alloc_ldt(ldt->entries, ldt->size);
61 pc->ldt = newldt;
62 wmb();
63 pc->size = mincount;
64 wmb();
65
66 if (reload) {
67#ifdef CONFIG_SMP
68 preempt_disable();
69 load_LDT(pc);
70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable();
74#else
75 load_LDT(pc);
76#endif
77 }
78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
81 vfree(oldldt);
82 else
83 put_page(virt_to_page(oldldt));
84 }
85 return 0;
86} 76}
87 77
88static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
89{ 81{
90 int err = alloc_ldt(new, old->size, 0); 82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
91 int i; 83 smp_store_release(&current_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
92 88
93 if (err < 0) 89static void free_ldt_struct(struct ldt_struct *ldt)
94 return err; 90{
91 if (likely(!ldt))
92 return;
95 93
96 for (i = 0; i < old->size; i++) 94 paravirt_free_ldt(ldt->entries, ldt->size);
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
98 return 0; 96 vfree(ldt->entries);
97 else
98 kfree(ldt->entries);
99 kfree(ldt);
99} 100}
100 101
101/* 102/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
104 */ 105 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{ 107{
108 struct ldt_struct *new_ldt;
107 struct mm_struct *old_mm; 109 struct mm_struct *old_mm;
108 int retval = 0; 110 int retval = 0;
109 111
110 mutex_init(&mm->context.lock); 112 mutex_init(&mm->context.lock);
111 mm->context.size = 0;
112 old_mm = current->mm; 113 old_mm = current->mm;
113 if (old_mm && old_mm->context.size > 0) { 114 if (!old_mm) {
114 mutex_lock(&old_mm->context.lock); 115 mm->context.ldt = NULL;
115 retval = copy_ldt(&mm->context, &old_mm->context); 116 return 0;
116 mutex_unlock(&old_mm->context.lock);
117 } 117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
118 return retval; 139 return retval;
119} 140}
120 141
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
125 */ 146 */
126void destroy_context(struct mm_struct *mm) 147void destroy_context(struct mm_struct *mm)
127{ 148{
128 if (mm->context.size) { 149 free_ldt_struct(mm->context.ldt);
129#ifdef CONFIG_X86_32 150 mm->context.ldt = NULL;
130 /* CHECKME: Can this ever happen ? */
131 if (mm == current->active_mm)
132 clear_LDT();
133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136 vfree(mm->context.ldt);
137 else
138 put_page(virt_to_page(mm->context.ldt));
139 mm->context.size = 0;
140 }
141} 151}
142 152
143static int read_ldt(void __user *ptr, unsigned long bytecount) 153static int read_ldt(void __user *ptr, unsigned long bytecount)
144{ 154{
145 int err; 155 int retval;
146 unsigned long size; 156 unsigned long size;
147 struct mm_struct *mm = current->mm; 157 struct mm_struct *mm = current->mm;
148 158
149 if (!mm->context.size) 159 mutex_lock(&mm->context.lock);
150 return 0; 160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
151 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153 168
154 mutex_lock(&mm->context.lock); 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
155 size = mm->context.size * LDT_ENTRY_SIZE;
156 if (size > bytecount) 170 if (size > bytecount)
157 size = bytecount; 171 size = bytecount;
158 172
159 err = 0; 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
160 if (copy_to_user(ptr, mm->context.ldt, size)) 174 retval = -EFAULT;
161 err = -EFAULT; 175 goto out_unlock;
162 mutex_unlock(&mm->context.lock); 176 }
163 if (err < 0) 177
164 goto error_return;
165 if (size != bytecount) { 178 if (size != bytecount) {
166 /* zero-fill the rest */ 179 /* Zero-fill the rest and pretend we read bytecount bytes. */
167 if (clear_user(ptr + size, bytecount - size) != 0) { 180 if (clear_user(ptr + size, bytecount - size)) {
168 err = -EFAULT; 181 retval = -EFAULT;
169 goto error_return; 182 goto out_unlock;
170 } 183 }
171 } 184 }
172 return bytecount; 185 retval = bytecount;
173error_return: 186
174 return err; 187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
175} 190}
176 191
177static int read_default_ldt(void __user *ptr, unsigned long bytecount) 192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
195 struct desc_struct ldt; 210 struct desc_struct ldt;
196 int error; 211 int error;
197 struct user_desc ldt_info; 212 struct user_desc ldt_info;
213 int oldsize, newsize;
214 struct ldt_struct *new_ldt, *old_ldt;
198 215
199 error = -EINVAL; 216 error = -EINVAL;
200 if (bytecount != sizeof(ldt_info)) 217 if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
213 goto out; 230 goto out;
214 } 231 }
215 232
216 mutex_lock(&mm->context.lock); 233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
217 if (ldt_info.entry_number >= mm->context.size) { 234 LDT_empty(&ldt_info)) {
218 error = alloc_ldt(&current->mm->context, 235 /* The user wants to clear the entry. */
219 ldt_info.entry_number + 1, 1); 236 memset(&ldt, 0, sizeof(ldt));
220 if (error < 0) 237 } else {
221 goto out_unlock; 238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
222 } 239 error = -EINVAL;
223 240 goto out;
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226 if (oldmode || LDT_empty(&ldt_info)) {
227 memset(&ldt, 0, sizeof(ldt));
228 goto install;
229 } 241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
230 } 246 }
231 247
232 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 248 mutex_lock(&mm->context.lock);
233 error = -EINVAL; 249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
234 goto out_unlock; 257 goto out_unlock;
235 }
236 258
237 fill_ldt(&ldt, &ldt_info); 259 if (old_ldt)
238 if (oldmode) 260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
239 ldt.avl = 0; 261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
240 263
241 /* Install the new entry ... */ 264 install_ldt(mm, new_ldt);
242install: 265 free_ldt_struct(old_ldt);
243 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
244 error = 0; 266 error = 0;
245 267
246out_unlock: 268out_unlock:
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index c3e985d1751c..d05bd2e2ee91 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
408NOKPROBE_SYMBOL(default_do_nmi); 408NOKPROBE_SYMBOL(default_do_nmi);
409 409
410/* 410/*
411 * NMIs can hit breakpoints which will cause it to lose its 411 * NMIs can page fault or hit breakpoints which will cause it to lose
412 * NMI context with the CPU when the breakpoint does an iret. 412 * its NMI context with the CPU when the breakpoint or page fault does an IRET.
413 */ 413 *
414#ifdef CONFIG_X86_32 414 * As a result, NMIs can nest if NMIs get unmasked due an IRET during
415/* 415 * NMI processing. On x86_64, the asm glue protects us from nested NMIs
416 * For i386, NMIs use the same stack as the kernel, and we can 416 * if the outer NMI came from kernel mode, but we can still nest if the
417 * add a workaround to the iret problem in C (preventing nested 417 * outer NMI came from user mode.
418 * NMIs if an NMI takes a trap). Simply have 3 states the NMI 418 *
419 * can be in: 419 * To handle these nested NMIs, we have three states:
420 * 420 *
421 * 1) not running 421 * 1) not running
422 * 2) executing 422 * 2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
430 * (Note, the latch is binary, thus multiple NMIs triggering, 430 * (Note, the latch is binary, thus multiple NMIs triggering,
431 * when one is running, are ignored. Only one NMI is restarted.) 431 * when one is running, are ignored. Only one NMI is restarted.)
432 * 432 *
433 * If an NMI hits a breakpoint that executes an iret, another 433 * If an NMI executes an iret, another NMI can preempt it. We do not
434 * NMI can preempt it. We do not want to allow this new NMI 434 * want to allow this new NMI to run, but we want to execute it when the
435 * to run, but we want to execute it when the first one finishes. 435 * first one finishes. We set the state to "latched", and the exit of
436 * We set the state to "latched", and the exit of the first NMI will 436 * the first NMI will perform a dec_return, if the result is zero
437 * perform a dec_return, if the result is zero (NOT_RUNNING), then 437 * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
438 * it will simply exit the NMI handler. If not, the dec_return 438 * dec_return would have set the state to NMI_EXECUTING (what we want it
439 * would have set the state to NMI_EXECUTING (what we want it to 439 * to be when we are running). In this case, we simply jump back to
440 * be when we are running). In this case, we simply jump back 440 * rerun the NMI handler again, and restart the 'latched' NMI.
441 * to rerun the NMI handler again, and restart the 'latched' NMI.
442 * 441 *
443 * No trap (breakpoint or page fault) should be hit before nmi_restart, 442 * No trap (breakpoint or page fault) should be hit before nmi_restart,
444 * thus there is no race between the first check of state for NOT_RUNNING 443 * thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
461static DEFINE_PER_CPU(enum nmi_states, nmi_state); 460static DEFINE_PER_CPU(enum nmi_states, nmi_state);
462static DEFINE_PER_CPU(unsigned long, nmi_cr2); 461static DEFINE_PER_CPU(unsigned long, nmi_cr2);
463 462
464#define nmi_nesting_preprocess(regs) \ 463#ifdef CONFIG_X86_64
465 do { \
466 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
467 this_cpu_write(nmi_state, NMI_LATCHED); \
468 return; \
469 } \
470 this_cpu_write(nmi_state, NMI_EXECUTING); \
471 this_cpu_write(nmi_cr2, read_cr2()); \
472 } while (0); \
473 nmi_restart:
474
475#define nmi_nesting_postprocess() \
476 do { \
477 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
478 write_cr2(this_cpu_read(nmi_cr2)); \
479 if (this_cpu_dec_return(nmi_state)) \
480 goto nmi_restart; \
481 } while (0)
482#else /* x86_64 */
483/* 464/*
484 * In x86_64 things are a bit more difficult. This has the same problem 465 * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without
485 * where an NMI hitting a breakpoint that calls iret will remove the 466 * some care, the inner breakpoint will clobber the outer breakpoint's
486 * NMI context, allowing a nested NMI to enter. What makes this more 467 * stack.
487 * difficult is that both NMIs and breakpoints have their own stack.
488 * When a new NMI or breakpoint is executed, the stack is set to a fixed
489 * point. If an NMI is nested, it will have its stack set at that same
490 * fixed address that the first NMI had, and will start corrupting the
491 * stack. This is handled in entry_64.S, but the same problem exists with
492 * the breakpoint stack.
493 * 468 *
494 * If a breakpoint is being processed, and the debug stack is being used, 469 * If a breakpoint is being processed, and the debug stack is being
495 * if an NMI comes in and also hits a breakpoint, the stack pointer 470 * used, if an NMI comes in and also hits a breakpoint, the stack
496 * will be set to the same fixed address as the breakpoint that was 471 * pointer will be set to the same fixed address as the breakpoint that
497 * interrupted, causing that stack to be corrupted. To handle this case, 472 * was interrupted, causing that stack to be corrupted. To handle this
498 * check if the stack that was interrupted is the debug stack, and if 473 * case, check if the stack that was interrupted is the debug stack, and
499 * so, change the IDT so that new breakpoints will use the current stack 474 * if so, change the IDT so that new breakpoints will use the current
500 * and not switch to the fixed address. On return of the NMI, switch back 475 * stack and not switch to the fixed address. On return of the NMI,
501 * to the original IDT. 476 * switch back to the original IDT.
502 */ 477 */
503static DEFINE_PER_CPU(int, update_debug_stack); 478static DEFINE_PER_CPU(int, update_debug_stack);
479#endif
504 480
505static inline void nmi_nesting_preprocess(struct pt_regs *regs) 481dotraplinkage notrace void
482do_nmi(struct pt_regs *regs, long error_code)
506{ 483{
484 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
485 this_cpu_write(nmi_state, NMI_LATCHED);
486 return;
487 }
488 this_cpu_write(nmi_state, NMI_EXECUTING);
489 this_cpu_write(nmi_cr2, read_cr2());
490nmi_restart:
491
492#ifdef CONFIG_X86_64
507 /* 493 /*
508 * If we interrupted a breakpoint, it is possible that 494 * If we interrupted a breakpoint, it is possible that
509 * the nmi handler will have breakpoints too. We need to 495 * the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
514 debug_stack_set_zero(); 500 debug_stack_set_zero();
515 this_cpu_write(update_debug_stack, 1); 501 this_cpu_write(update_debug_stack, 1);
516 } 502 }
517}
518
519static inline void nmi_nesting_postprocess(void)
520{
521 if (unlikely(this_cpu_read(update_debug_stack))) {
522 debug_stack_reset();
523 this_cpu_write(update_debug_stack, 0);
524 }
525}
526#endif 503#endif
527 504
528dotraplinkage notrace void
529do_nmi(struct pt_regs *regs, long error_code)
530{
531 nmi_nesting_preprocess(regs);
532
533 nmi_enter(); 505 nmi_enter();
534 506
535 inc_irq_stat(__nmi_count); 507 inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
539 511
540 nmi_exit(); 512 nmi_exit();
541 513
542 /* On i386, may loop back to preprocess */ 514#ifdef CONFIG_X86_64
543 nmi_nesting_postprocess(); 515 if (unlikely(this_cpu_read(update_debug_stack))) {
516 debug_stack_reset();
517 this_cpu_write(update_debug_stack, 0);
518 }
519#endif
520
521 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
522 write_cr2(this_cpu_read(nmi_cr2));
523 if (this_cpu_dec_return(nmi_state))
524 goto nmi_restart;
544} 525}
545NOKPROBE_SYMBOL(do_nmi); 526NOKPROBE_SYMBOL(do_nmi);
546 527
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9cad694ed7c4..c27cad726765 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
81 */ 81 */
82int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 82int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
83{ 83{
84 *dst = *src; 84 memcpy(dst, src, arch_task_struct_size);
85 85
86 return fpu__copy(&dst->thread.fpu, &src->thread.fpu); 86 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
87} 87}
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
408static void mwait_idle(void) 408static void mwait_idle(void)
409{ 409{
410 if (!current_set_polling_and_test()) { 410 if (!current_set_polling_and_test()) {
411 trace_cpu_idle_rcuidle(1, smp_processor_id());
411 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { 412 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
412 smp_mb(); /* quirk */ 413 smp_mb(); /* quirk */
413 clflush((void *)&current_thread_info()->flags); 414 clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
419 __sti_mwait(0, 0); 420 __sti_mwait(0, 0);
420 else 421 else
421 local_irq_enable(); 422 local_irq_enable();
423 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
422 } else { 424 } else {
423 local_irq_enable(); 425 local_irq_enable();
424 } 426 }
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
121void release_thread(struct task_struct *dead_task) 121void release_thread(struct task_struct *dead_task)
122{ 122{
123 if (dead_task->mm) { 123 if (dead_task->mm) {
124 if (dead_task->mm->context.size) { 124 if (dead_task->mm->context.ldt) {
125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
126 dead_task->comm, 126 dead_task->comm,
127 dead_task->mm->context.ldt, 127 dead_task->mm->context.ldt,
128 dead_task->mm->context.size); 128 dead_task->mm->context.ldt->size);
129 BUG(); 129 BUG();
130 } 130 }
131 } 131 }
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
93 COPY(r15); 93 COPY(r15);
94#endif /* CONFIG_X86_64 */ 94#endif /* CONFIG_X86_64 */
95 95
96#ifdef CONFIG_X86_32
96 COPY_SEG_CPL3(cs); 97 COPY_SEG_CPL3(cs);
97 COPY_SEG_CPL3(ss); 98 COPY_SEG_CPL3(ss);
99#else /* !CONFIG_X86_32 */
100 /* Kernel saves and restores only the CS segment register on signals,
101 * which is the bare minimum needed to allow mixed 32/64-bit code.
102 * App's signal handler can save/restore other segments if needed. */
103 COPY_SEG_CPL3(cs);
104#endif /* CONFIG_X86_32 */
98 105
99 get_user_ex(tmpflags, &sc->flags); 106 get_user_ex(tmpflags, &sc->flags);
100 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 107 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
154#else /* !CONFIG_X86_32 */ 161#else /* !CONFIG_X86_32 */
155 put_user_ex(regs->flags, &sc->flags); 162 put_user_ex(regs->flags, &sc->flags);
156 put_user_ex(regs->cs, &sc->cs); 163 put_user_ex(regs->cs, &sc->cs);
157 put_user_ex(0, &sc->__pad2); 164 put_user_ex(0, &sc->gs);
158 put_user_ex(0, &sc->__pad1); 165 put_user_ex(0, &sc->fs);
159 put_user_ex(regs->ss, &sc->ss);
160#endif /* CONFIG_X86_32 */ 166#endif /* CONFIG_X86_32 */
161 167
162 put_user_ex(fpstate, &sc->fpstate); 168 put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
451 457
452 regs->sp = (unsigned long)frame; 458 regs->sp = (unsigned long)frame;
453 459
454 /* 460 /* Set up the CS register to run signal handlers in 64-bit mode,
455 * Set up the CS and SS registers to run signal handlers in 461 even if the handler happens to be interrupting 32-bit code. */
456 * 64-bit mode, even if the handler happens to be interrupting
457 * 32-bit or 16-bit code.
458 *
459 * SS is subtle. In 64-bit mode, we don't need any particular
460 * SS descriptor, but we do need SS to be valid. It's possible
461 * that the old SS is entirely bogus -- this can happen if the
462 * signal we're trying to deliver is #GP or #SS caused by a bad
463 * SS value.
464 */
465 regs->cs = __USER_CS; 462 regs->cs = __USER_CS;
466 regs->ss = __USER_DS;
467 463
468 return 0; 464 return 0;
469} 465}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8add66b22f33..b1f3ed9c7a9e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -171,11 +171,6 @@ static void smp_callin(void)
171 apic_ap_setup(); 171 apic_ap_setup();
172 172
173 /* 173 /*
174 * Need to setup vector mappings before we enable interrupts.
175 */
176 setup_vector_irq(smp_processor_id());
177
178 /*
179 * Save our processor parameters. Note: this information 174 * Save our processor parameters. Note: this information
180 * is needed for clock calibration. 175 * is needed for clock calibration.
181 */ 176 */
@@ -239,18 +234,13 @@ static void notrace start_secondary(void *unused)
239 check_tsc_sync_target(); 234 check_tsc_sync_target();
240 235
241 /* 236 /*
242 * Enable the espfix hack for this CPU 237 * Lock vector_lock and initialize the vectors on this cpu
243 */ 238 * before setting the cpu online. We must set it online with
244#ifdef CONFIG_X86_ESPFIX64 239 * vector_lock held to prevent a concurrent setup/teardown
245 init_espfix_ap(); 240 * from seeing a half valid vector space.
246#endif
247
248 /*
249 * We need to hold vector_lock so there the set of online cpus
250 * does not change while we are assigning vectors to cpus. Holding
251 * this lock ensures we don't half assign or remove an irq from a cpu.
252 */ 241 */
253 lock_vector_lock(); 242 lock_vector_lock();
243 setup_vector_irq(smp_processor_id());
254 set_cpu_online(smp_processor_id(), true); 244 set_cpu_online(smp_processor_id(), true);
255 unlock_vector_lock(); 245 unlock_vector_lock();
256 cpu_set_state_online(smp_processor_id()); 246 cpu_set_state_online(smp_processor_id());
@@ -854,6 +844,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
854 initial_code = (unsigned long)start_secondary; 844 initial_code = (unsigned long)start_secondary;
855 stack_start = idle->thread.sp; 845 stack_start = idle->thread.sp;
856 846
847 /*
848 * Enable the espfix hack for this CPU
849 */
850#ifdef CONFIG_X86_ESPFIX64
851 init_espfix_ap(cpu);
852#endif
853
857 /* So we see what's up */ 854 /* So we see what's up */
858 announce_cpu(cpu, apicid); 855 announce_cpu(cpu, apicid);
859 856
@@ -995,8 +992,17 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
995 992
996 common_cpu_up(cpu, tidle); 993 common_cpu_up(cpu, tidle);
997 994
995 /*
996 * We have to walk the irq descriptors to setup the vector
997 * space for the cpu which comes online. Prevent irq
998 * alloc/free across the bringup.
999 */
1000 irq_lock_sparse();
1001
998 err = do_boot_cpu(apicid, cpu, tidle); 1002 err = do_boot_cpu(apicid, cpu, tidle);
1003
999 if (err) { 1004 if (err) {
1005 irq_unlock_sparse();
1000 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); 1006 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1001 return -EIO; 1007 return -EIO;
1002 } 1008 }
@@ -1014,6 +1020,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1014 touch_nmi_watchdog(); 1020 touch_nmi_watchdog();
1015 } 1021 }
1016 1022
1023 irq_unlock_sparse();
1024
1017 return 0; 1025 return 0;
1018} 1026}
1019 1027
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..0ccb53a9fcd9 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h> 7#include <asm/desc.h>
8#include <asm/mmu_context.h>
8 9
9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 10unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
10{ 11{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27 struct desc_struct *desc; 28 struct desc_struct *desc;
28 unsigned long base; 29 unsigned long base;
29 30
30 seg &= ~7UL; 31 seg >>= 3;
31 32
32 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
33 if (unlikely((seg >> 3) >= child->mm->context.size)) 34 if (unlikely(!child->mm->context.ldt ||
35 seg >= child->mm->context.ldt->size))
34 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
35 else { 37 else {
36 desc = child->mm->context.ldt + seg; 38 desc = &child->mm->context.ldt->entries[seg];
37 base = get_desc_base(desc); 39 base = get_desc_base(desc);
38 40
39 /* 16-bit code segment? */ 41 /* 16-bit code segment? */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 505449700e0c..7437b41f6a47 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -598,10 +598,19 @@ static unsigned long quick_pit_calibrate(void)
598 if (!pit_expect_msb(0xff-i, &delta, &d2)) 598 if (!pit_expect_msb(0xff-i, &delta, &d2))
599 break; 599 break;
600 600
601 delta -= tsc;
602
603 /*
604 * Extrapolate the error and fail fast if the error will
605 * never be below 500 ppm.
606 */
607 if (i == 1 &&
608 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
609 return 0;
610
601 /* 611 /*
602 * Iterate until the error is less than 500 ppm 612 * Iterate until the error is less than 500 ppm
603 */ 613 */
604 delta -= tsc;
605 if (d1+d2 >= delta >> 11) 614 if (d1+d2 >= delta >> 11)
606 continue; 615 continue;
607 616
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 64dd46793099..2fbea2544f24 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99 99
100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); 100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
101 if (vcpu->arch.eager_fpu)
102 kvm_x86_ops->fpu_activate(vcpu);
101 103
102 /* 104 /*
103 * The existing code assumes virtual address is 48-bit in the canonical 105 * The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/iommu.c b/arch/x86/kvm/iommu.c
index 7dbced309ddb..5c520ebf6343 100644
--- a/arch/x86/kvm/iommu.c
+++ b/arch/x86/kvm/iommu.c
@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
200 goto out_unmap; 200 goto out_unmap;
201 } 201 }
202 202
203 kvm_arch_start_assignment(kvm);
203 pci_set_dev_assigned(pdev); 204 pci_set_dev_assigned(pdev);
204 205
205 dev_info(&pdev->dev, "kvm assign device\n"); 206 dev_info(&pdev->dev, "kvm assign device\n");
@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
224 iommu_detach_device(domain, &pdev->dev); 225 iommu_detach_device(domain, &pdev->dev);
225 226
226 pci_clear_dev_assigned(pdev); 227 pci_clear_dev_assigned(pdev);
228 kvm_arch_end_assignment(kvm);
227 229
228 dev_info(&pdev->dev, "kvm deassign device\n"); 230 dev_info(&pdev->dev, "kvm deassign device\n");
229 231
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1595 for (i = 0; i < APIC_LVT_NUM; i++) 1595 for (i = 0; i < APIC_LVT_NUM; i++)
1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1597 apic_update_lvtt(apic); 1597 apic_update_lvtt(apic);
1598 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) 1598 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1599 apic_set_reg(apic, APIC_LVT0, 1599 apic_set_reg(apic, APIC_LVT0,
1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); 1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f807496b62c2..44171462bd2a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2479 return 0; 2479 return 0;
2480} 2480}
2481 2481
2482static bool kvm_is_mmio_pfn(pfn_t pfn)
2483{
2484 if (pfn_valid(pfn))
2485 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
2486
2487 return true;
2488}
2489
2482static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 2490static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2483 unsigned pte_access, int level, 2491 unsigned pte_access, int level,
2484 gfn_t gfn, pfn_t pfn, bool speculative, 2492 gfn_t gfn, pfn_t pfn, bool speculative,
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2506 spte |= PT_PAGE_SIZE_MASK; 2514 spte |= PT_PAGE_SIZE_MASK;
2507 if (tdp_enabled) 2515 if (tdp_enabled)
2508 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2516 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2509 kvm_is_reserved_pfn(pfn)); 2517 kvm_is_mmio_pfn(pfn));
2510 2518
2511 if (host_writable) 2519 if (host_writable)
2512 spte |= SPTE_HOST_WRITEABLE; 2520 spte |= SPTE_HOST_WRITEABLE;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void)
124{
125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory.
129 */
130 return MTRR_TYPE_UNCACHABLE;
131}
132
123/* 133/*
124* Three terms are used in the following code: 134* Three terms are used in the following code:
125* - segment, it indicates the address segments covered by fixed MTRRs. 135* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
434 444
435 /* output fields. */ 445 /* output fields. */
436 int mem_type; 446 int mem_type;
447 /* mtrr is completely disabled? */
448 bool mtrr_disabled;
437 /* [start, end) is not fully covered in MTRRs? */ 449 /* [start, end) is not fully covered in MTRRs? */
438 bool partial_map; 450 bool partial_map;
439 451
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
549static void mtrr_lookup_start(struct mtrr_iter *iter) 561static void mtrr_lookup_start(struct mtrr_iter *iter)
550{ 562{
551 if (!mtrr_is_enabled(iter->mtrr_state)) { 563 if (!mtrr_is_enabled(iter->mtrr_state)) {
552 iter->partial_map = true; 564 iter->mtrr_disabled = true;
553 return; 565 return;
554 } 566 }
555 567
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
563 iter->mtrr_state = mtrr_state; 575 iter->mtrr_state = mtrr_state;
564 iter->start = start; 576 iter->start = start;
565 iter->end = end; 577 iter->end = end;
578 iter->mtrr_disabled = false;
566 iter->partial_map = false; 579 iter->partial_map = false;
567 iter->fixed = false; 580 iter->fixed = false;
568 iter->range = NULL; 581 iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
656 return MTRR_TYPE_WRBACK; 669 return MTRR_TYPE_WRBACK;
657 } 670 }
658 671
659 /* It is not covered by MTRRs. */ 672 if (iter.mtrr_disabled)
660 if (iter.partial_map) { 673 return mtrr_disabled_type();
661 /* 674
662 * We just check one page, partially covered by MTRRs is 675 /* not contained in any MTRRs. */
663 * impossible. 676 if (type == -1)
664 */ 677 return mtrr_default_type(mtrr_state);
665 WARN_ON(type != -1); 678
666 type = mtrr_default_type(mtrr_state); 679 /*
667 } 680 * We just check one page, partially covered by MTRRs is
681 * impossible.
682 */
683 WARN_ON(iter.partial_map);
684
668 return type; 685 return type;
669} 686}
670EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); 687EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
689 return false; 706 return false;
690 } 707 }
691 708
709 if (iter.mtrr_disabled)
710 return true;
711
692 if (!iter.partial_map) 712 if (!iter.partial_map)
693 return true; 713 return true;
694 714
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 602b974a60a6..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
865 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); 865 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
866} 866}
867 867
868#define MTRR_TYPE_UC_MINUS 7
869#define MTRR2PROTVAL_INVALID 0xff
870
871static u8 mtrr2protval[8];
872
873static u8 fallback_mtrr_type(int mtrr)
874{
875 /*
876 * WT and WP aren't always available in the host PAT. Treat
877 * them as UC and UC- respectively. Everything else should be
878 * there.
879 */
880 switch (mtrr)
881 {
882 case MTRR_TYPE_WRTHROUGH:
883 return MTRR_TYPE_UNCACHABLE;
884 case MTRR_TYPE_WRPROT:
885 return MTRR_TYPE_UC_MINUS;
886 default:
887 BUG();
888 }
889}
890
891static void build_mtrr2protval(void)
892{
893 int i;
894 u64 pat;
895
896 for (i = 0; i < 8; i++)
897 mtrr2protval[i] = MTRR2PROTVAL_INVALID;
898
899 /* Ignore the invalid MTRR types. */
900 mtrr2protval[2] = 0;
901 mtrr2protval[3] = 0;
902
903 /*
904 * Use host PAT value to figure out the mapping from guest MTRR
905 * values to nested page table PAT/PCD/PWT values. We do not
906 * want to change the host PAT value every time we enter the
907 * guest.
908 */
909 rdmsrl(MSR_IA32_CR_PAT, pat);
910 for (i = 0; i < 8; i++) {
911 u8 mtrr = pat >> (8 * i);
912
913 if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
914 mtrr2protval[mtrr] = __cm_idx2pte(i);
915 }
916
917 for (i = 0; i < 8; i++) {
918 if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
919 u8 fallback = fallback_mtrr_type(i);
920 mtrr2protval[i] = mtrr2protval[fallback];
921 BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
922 }
923 }
924}
925
868static __init int svm_hardware_setup(void) 926static __init int svm_hardware_setup(void)
869{ 927{
870 int cpu; 928 int cpu;
@@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void)
931 } else 989 } else
932 kvm_disable_tdp(); 990 kvm_disable_tdp();
933 991
992 build_mtrr2protval();
934 return 0; 993 return 0;
935 994
936err: 995err:
@@ -1085,6 +1144,39 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1085 return target_tsc - tsc; 1144 return target_tsc - tsc;
1086} 1145}
1087 1146
1147static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
1148{
1149 struct kvm_vcpu *vcpu = &svm->vcpu;
1150
1151 /* Unlike Intel, AMD takes the guest's CR0.CD into account.
1152 *
1153 * AMD does not have IPAT. To emulate it for the case of guests
1154 * with no assigned devices, just set everything to WB. If guests
1155 * have assigned devices, however, we cannot force WB for RAM
1156 * pages only, so use the guest PAT directly.
1157 */
1158 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1159 *g_pat = 0x0606060606060606;
1160 else
1161 *g_pat = vcpu->arch.pat;
1162}
1163
1164static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1165{
1166 u8 mtrr;
1167
1168 /*
1169 * 1. MMIO: trust guest MTRR, so same as item 3.
1170 * 2. No passthrough: always map as WB, and force guest PAT to WB as well
1171 * 3. Passthrough: can't guarantee the result, try to trust guest.
1172 */
1173 if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
1174 return 0;
1175
1176 mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
1177 return mtrr2protval[mtrr];
1178}
1179
1088static void init_vmcb(struct vcpu_svm *svm, bool init_event) 1180static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1089{ 1181{
1090 struct vmcb_control_area *control = &svm->vmcb->control; 1182 struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1180,6 +1272,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1180 clr_cr_intercept(svm, INTERCEPT_CR3_READ); 1272 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1181 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); 1273 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1182 save->g_pat = svm->vcpu.arch.pat; 1274 save->g_pat = svm->vcpu.arch.pat;
1275 svm_set_guest_pat(svm, &save->g_pat);
1183 save->cr3 = 0; 1276 save->cr3 = 0;
1184 save->cr4 = 0; 1277 save->cr4 = 0;
1185 } 1278 }
@@ -1579,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1579 * does not do it - this results in some delay at 1672 * does not do it - this results in some delay at
1580 * reboot 1673 * reboot
1581 */ 1674 */
1582 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) 1675 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1583 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1584 svm->vmcb->save.cr0 = cr0; 1677 svm->vmcb->save.cr0 = cr0;
1585 mark_dirty(svm->vmcb, VMCB_CR); 1678 mark_dirty(svm->vmcb, VMCB_CR);
@@ -3254,6 +3347,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3254 case MSR_VM_IGNNE: 3347 case MSR_VM_IGNNE:
3255 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); 3348 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3256 break; 3349 break;
3350 case MSR_IA32_CR_PAT:
3351 if (npt_enabled) {
3352 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3353 return 1;
3354 vcpu->arch.pat = data;
3355 svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
3356 mark_dirty(svm->vmcb, VMCB_NPT);
3357 break;
3358 }
3359 /* fall through */
3257 default: 3360 default:
3258 return kvm_set_msr_common(vcpu, msr); 3361 return kvm_set_msr_common(vcpu, msr);
3259 } 3362 }
@@ -4088,11 +4191,6 @@ static bool svm_has_high_real_mode_segbase(void)
4088 return true; 4191 return true;
4089} 4192}
4090 4193
4091static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4092{
4093 return 0;
4094}
4095
4096static void svm_cpuid_update(struct kvm_vcpu *vcpu) 4194static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4097{ 4195{
4098} 4196}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e856dd566f4c..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8632,22 +8632,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8632 u64 ipat = 0; 8632 u64 ipat = 0;
8633 8633
8634 /* For VT-d and EPT combination 8634 /* For VT-d and EPT combination
8635 * 1. MMIO: always map as UC 8635 * 1. MMIO: guest may want to apply WC, trust it.
8636 * 2. EPT with VT-d: 8636 * 2. EPT with VT-d:
8637 * a. VT-d without snooping control feature: can't guarantee the 8637 * a. VT-d without snooping control feature: can't guarantee the
8638 * result, try to trust guest. 8638 * result, try to trust guest. So the same as item 1.
8639 * b. VT-d with snooping control feature: snooping control feature of 8639 * b. VT-d with snooping control feature: snooping control feature of
8640 * VT-d engine can guarantee the cache correctness. Just set it 8640 * VT-d engine can guarantee the cache correctness. Just set it
8641 * to WB to keep consistent with host. So the same as item 3. 8641 * to WB to keep consistent with host. So the same as item 3.
8642 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep 8642 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
8643 * consistent with host MTRR 8643 * consistent with host MTRR
8644 */ 8644 */
8645 if (is_mmio) { 8645 if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
8646 cache = MTRR_TYPE_UNCACHABLE;
8647 goto exit;
8648 }
8649
8650 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
8651 ipat = VMX_EPT_IPAT_BIT; 8646 ipat = VMX_EPT_IPAT_BIT;
8652 cache = MTRR_TYPE_WRBACK; 8647 cache = MTRR_TYPE_WRBACK;
8653 goto exit; 8648 goto exit;
@@ -8655,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8655 8650
8656 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
8657 ipat = VMX_EPT_IPAT_BIT; 8652 ipat = VMX_EPT_IPAT_BIT;
8658 cache = MTRR_TYPE_UNCACHABLE; 8653 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
8654 cache = MTRR_TYPE_WRBACK;
8655 else
8656 cache = MTRR_TYPE_UNCACHABLE;
8659 goto exit; 8657 goto exit;
8660 } 8658 }
8661 8659
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bbaf44e8f0d3..8f0f6eca69da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2105 if (guest_cpuid_has_tsc_adjust(vcpu)) { 2105 if (guest_cpuid_has_tsc_adjust(vcpu)) {
2106 if (!msr_info->host_initiated) { 2106 if (!msr_info->host_initiated) {
2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2108 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 2108 adjust_tsc_offset_guest(vcpu, adj);
2109 } 2109 }
2110 vcpu->arch.ia32_tsc_adjust_msr = data; 2110 vcpu->arch.ia32_tsc_adjust_msr = data;
2111 } 2111 }
@@ -3157,8 +3157,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
3157 cpuid_count(XSTATE_CPUID, index, 3157 cpuid_count(XSTATE_CPUID, index,
3158 &size, &offset, &ecx, &edx); 3158 &size, &offset, &ecx, &edx);
3159 memcpy(dest, src + offset, size); 3159 memcpy(dest, src + offset, size);
3160 } else 3160 }
3161 WARN_ON_ONCE(1);
3162 3161
3163 valid -= feature; 3162 valid -= feature;
3164 } 3163 }
@@ -6328,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6328static void process_smi(struct kvm_vcpu *vcpu) 6327static void process_smi(struct kvm_vcpu *vcpu)
6329{ 6328{
6330 struct kvm_segment cs, ds; 6329 struct kvm_segment cs, ds;
6330 struct desc_ptr dt;
6331 char buf[512]; 6331 char buf[512];
6332 u32 cr0; 6332 u32 cr0;
6333 6333
@@ -6360,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
6360 6360
6361 kvm_x86_ops->set_cr4(vcpu, 0); 6361 kvm_x86_ops->set_cr4(vcpu, 0);
6362 6362
6363 /* Undocumented: IDT limit is set to zero on entry to SMM. */
6364 dt.address = dt.size = 0;
6365 kvm_x86_ops->set_idt(vcpu, &dt);
6366
6363 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 6367 __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6364 6368
6365 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 6369 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
@@ -7315,11 +7319,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
7315 7319
7316 vcpu = kvm_x86_ops->vcpu_create(kvm, id); 7320 vcpu = kvm_x86_ops->vcpu_create(kvm, id);
7317 7321
7318 /*
7319 * Activate fpu unconditionally in case the guest needs eager FPU. It will be
7320 * deactivated soon if it doesn't.
7321 */
7322 kvm_x86_ops->fpu_activate(vcpu);
7323 return vcpu; 7322 return vcpu;
7324} 7323}
7325 7324
@@ -8218,6 +8217,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
8218 kvm_x86_ops->interrupt_allowed(vcpu); 8217 kvm_x86_ops->interrupt_allowed(vcpu);
8219} 8218}
8220 8219
8220void kvm_arch_start_assignment(struct kvm *kvm)
8221{
8222 atomic_inc(&kvm->arch.assigned_device_count);
8223}
8224EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
8225
8226void kvm_arch_end_assignment(struct kvm *kvm)
8227{
8228 atomic_dec(&kvm->arch.assigned_device_count);
8229}
8230EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
8231
8232bool kvm_arch_has_assigned_device(struct kvm *kvm)
8233{
8234 return atomic_read(&kvm->arch.assigned_device_count);
8235}
8236EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
8237
8221void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 8238void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
8222{ 8239{
8223 atomic_inc(&kvm->arch.noncoherent_dma_count); 8240 atomic_inc(&kvm->arch.noncoherent_dma_count);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
147 return kvm_register_write(vcpu, reg, val); 147 return kvm_register_write(vcpu, reg, val);
148} 148}
149 149
150static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
151{
152 return !(kvm->arch.disabled_quirks & quirk);
153}
154
150void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 155void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
151void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 156void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
152void kvm_set_pending_timer(struct kvm_vcpu *vcpu); 157void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ddf9ecb53cc3..e342586db6e4 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
20 unsigned long ret; 20 unsigned long ret;
21 21
22 if (__range_not_ok(from, n, TASK_SIZE)) 22 if (__range_not_ok(from, n, TASK_SIZE))
23 return 0; 23 return n;
24 24
25 /* 25 /*
26 * Even though this function is typically called from NMI/IRQ context 26 * Even though this function is typically called from NMI/IRQ context
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84ab49f3..3d8f2e421466 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
29 29
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/traps.h> 31#include <asm/traps.h>
32#include <asm/desc.h>
33#include <asm/user.h> 32#include <asm/user.h>
34#include <asm/fpu/internal.h> 33#include <asm/fpu/internal.h>
35 34
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
181 math_abort(FPU_info, SIGILL); 180 math_abort(FPU_info, SIGILL);
182 } 181 }
183 182
184 code_descriptor = LDT_DESCRIPTOR(FPU_CS); 183 code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
185 if (SEG_D_SIZE(code_descriptor)) { 184 if (SEG_D_SIZE(code_descriptor)) {
186 /* The above test may be wrong, the book is not clear */ 185 /* The above test may be wrong, the book is not clear */
187 /* Segmented 32 bit protected mode */ 186 /* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb61a4fa..5e044d506b7a 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18 18
19/* s is always from a cpu register, and the cpu does bounds checking 19#include <asm/desc.h>
20 * during register load --> no further bounds checks needed */ 20#include <asm/mmu_context.h>
21#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) 21
22static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
23{
24 static struct desc_struct zero_desc;
25 struct desc_struct ret = zero_desc;
26
27#ifdef CONFIG_MODIFY_LDT_SYSCALL
28 seg >>= 3;
29 mutex_lock(&current->mm->context.lock);
30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
31 ret = current->mm->context.ldt->entries[seg];
32 mutex_unlock(&current->mm->context.lock);
33#endif
34 return ret;
35}
36
22#define SEG_D_SIZE(x) ((x).b & (3 << 21)) 37#define SEG_D_SIZE(x) ((x).b & (3 << 21))
23#define SEG_G_BIT(x) ((x).b & (1 << 23)) 38#define SEG_G_BIT(x) ((x).b & (1 << 23))
24#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) 39#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99380f9..8300db71c2a6 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
20#include <linux/stddef.h> 20#include <linux/stddef.h>
21 21
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/desc.h>
24 23
25#include "fpu_system.h" 24#include "fpu_system.h"
26#include "exception.h" 25#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
158 addr->selector = PM_REG_(segment); 157 addr->selector = PM_REG_(segment);
159 } 158 }
160 159
161 descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); 160 descriptor = FPU_get_ldt_descriptor(addr->selector);
162 base_address = SEG_BASE_ADDR(descriptor); 161 base_address = SEG_BASE_ADDR(descriptor);
163 address = base_address + offset; 162 address = base_address + offset;
164 limit = base_address 163 limit = base_address
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cc5ccc415cc0..b9c78f3bcd67 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
63 !PageReserved(pfn_to_page(start_pfn + i))) 63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1; 64 return 1;
65 65
66 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
67
68 return 0; 66 return 0;
69} 67}
70 68
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
94 pgprot_t prot; 92 pgprot_t prot;
95 int retval; 93 int retval;
96 void __iomem *ret_addr; 94 void __iomem *ret_addr;
97 int ram_region;
98 95
99 /* Don't allow wraparound or zero size */ 96 /* Don't allow wraparound or zero size */
100 last_addr = phys_addr + size - 1; 97 last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
117 /* 114 /*
118 * Don't allow anybody to remap normal RAM that we're using.. 115 * Don't allow anybody to remap normal RAM that we're using..
119 */ 116 */
120 /* First check if whole region can be identified as RAM or not */ 117 pfn = phys_addr >> PAGE_SHIFT;
121 ram_region = region_is_ram(phys_addr, size); 118 last_pfn = last_addr >> PAGE_SHIFT;
122 if (ram_region > 0) { 119 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
123 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 120 __ioremap_check_ram) == 1) {
124 (unsigned long int)phys_addr, 121 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
125 (unsigned long int)last_addr); 122 &phys_addr, &last_addr);
126 return NULL; 123 return NULL;
127 } 124 }
128 125
129 /* If could not be identified(-1), check page by page */
130 if (ram_region < 0) {
131 pfn = phys_addr >> PAGE_SHIFT;
132 last_pfn = last_addr >> PAGE_SHIFT;
133 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
134 __ioremap_check_ram) == 1)
135 return NULL;
136 }
137 /* 126 /*
138 * Mappings have to be page-aligned 127 * Mappings have to be page-aligned
139 */ 128 */
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906c6b9f..e1840f3db5b5 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
1#define pr_fmt(fmt) "kasan: " fmt
1#include <linux/bootmem.h> 2#include <linux/bootmem.h>
2#include <linux/kasan.h> 3#include <linux/kasan.h>
3#include <linux/kdebug.h> 4#include <linux/kdebug.h>
@@ -11,7 +12,19 @@
11extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 12extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX]; 13extern struct range pfn_mapped[E820_X_MAX];
13 14
14extern unsigned char kasan_zero_page[PAGE_SIZE]; 15static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
16static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
17static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
18
19/*
20 * This page used as early shadow. We don't use empty_zero_page
21 * at early stages, stack instrumentation could write some garbage
22 * to this page.
23 * Latter we reuse it as zero shadow for large ranges of memory
24 * that allowed to access, but not instrumented by kasan
25 * (vmalloc/vmemmap ...).
26 */
27static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
15 28
16static int __init map_range(struct range *range) 29static int __init map_range(struct range *range)
17{ 30{
@@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start,
36 pgd_clear(pgd_offset_k(start)); 49 pgd_clear(pgd_offset_k(start));
37} 50}
38 51
39void __init kasan_map_early_shadow(pgd_t *pgd) 52static void __init kasan_map_early_shadow(pgd_t *pgd)
40{ 53{
41 int i; 54 int i;
42 unsigned long start = KASAN_SHADOW_START; 55 unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
73 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { 86 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
74 WARN_ON(!pmd_none(*pmd)); 87 WARN_ON(!pmd_none(*pmd));
75 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) 88 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
76 | __PAGE_KERNEL_RO)); 89 | _KERNPG_TABLE));
77 addr += PMD_SIZE; 90 addr += PMD_SIZE;
78 pmd = pmd_offset(pud, addr); 91 pmd = pmd_offset(pud, addr);
79 } 92 }
@@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
99 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { 112 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
100 WARN_ON(!pud_none(*pud)); 113 WARN_ON(!pud_none(*pud));
101 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) 114 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
102 | __PAGE_KERNEL_RO)); 115 | _KERNPG_TABLE));
103 addr += PUD_SIZE; 116 addr += PUD_SIZE;
104 pud = pud_offset(pgd, addr); 117 pud = pud_offset(pgd, addr);
105 } 118 }
@@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
124 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { 137 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
125 WARN_ON(!pgd_none(*pgd)); 138 WARN_ON(!pgd_none(*pgd));
126 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) 139 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
127 | __PAGE_KERNEL_RO)); 140 | _KERNPG_TABLE));
128 addr += PGDIR_SIZE; 141 addr += PGDIR_SIZE;
129 pgd = pgd_offset_k(addr); 142 pgd = pgd_offset_k(addr);
130 } 143 }
@@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = {
166}; 179};
167#endif 180#endif
168 181
182void __init kasan_early_init(void)
183{
184 int i;
185 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
186 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
187 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
188
189 for (i = 0; i < PTRS_PER_PTE; i++)
190 kasan_zero_pte[i] = __pte(pte_val);
191
192 for (i = 0; i < PTRS_PER_PMD; i++)
193 kasan_zero_pmd[i] = __pmd(pmd_val);
194
195 for (i = 0; i < PTRS_PER_PUD; i++)
196 kasan_zero_pud[i] = __pud(pud_val);
197
198 kasan_map_early_shadow(early_level4_pgt);
199 kasan_map_early_shadow(init_level4_pgt);
200}
201
169void __init kasan_init(void) 202void __init kasan_init(void)
170{ 203{
171 int i; 204 int i;
@@ -176,6 +209,7 @@ void __init kasan_init(void)
176 209
177 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); 210 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
178 load_cr3(early_level4_pgt); 211 load_cr3(early_level4_pgt);
212 __flush_tlb_all();
179 213
180 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); 214 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
181 215
@@ -202,5 +236,8 @@ void __init kasan_init(void)
202 memset(kasan_zero_page, 0, PAGE_SIZE); 236 memset(kasan_zero_page, 0, PAGE_SIZE);
203 237
204 load_cr3(init_level4_pgt); 238 load_cr3(init_level4_pgt);
239 __flush_tlb_all();
205 init_task.kasan_depth = 0; 240 init_task.kasan_depth = 0;
241
242 pr_info("Kernel address sanitizer initialized\n");
206} 243}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 9d518d693b4b..844b06d67df4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
127 } 127 }
128} 128}
129
130const char *arch_vma_name(struct vm_area_struct *vma)
131{
132 if (vma->vm_flags & VM_MPX)
133 return "[mpx]";
134 return NULL;
135}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7a657f58bbea..db1b0bc5017c 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -20,20 +20,6 @@
20#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
21#include <asm/trace/mpx.h> 21#include <asm/trace/mpx.h>
22 22
23static const char *mpx_mapping_name(struct vm_area_struct *vma)
24{
25 return "[mpx]";
26}
27
28static struct vm_operations_struct mpx_vma_ops = {
29 .name = mpx_mapping_name,
30};
31
32static int is_mpx_vma(struct vm_area_struct *vma)
33{
34 return (vma->vm_ops == &mpx_vma_ops);
35}
36
37static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 23static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
38{ 24{
39 if (is_64bit_mm(mm)) 25 if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
53/* 39/*
54 * This is really a simplified "vm_mmap". it only handles MPX 40 * This is really a simplified "vm_mmap". it only handles MPX
55 * bounds tables (the bounds directory is user-allocated). 41 * bounds tables (the bounds directory is user-allocated).
56 *
57 * Later on, we use the vma->vm_ops to uniquely identify these
58 * VMAs.
59 */ 42 */
60static unsigned long mpx_mmap(unsigned long len) 43static unsigned long mpx_mmap(unsigned long len)
61{ 44{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
101 ret = -ENOMEM; 84 ret = -ENOMEM;
102 goto out; 85 goto out;
103 } 86 }
104 vma->vm_ops = &mpx_vma_ops;
105 87
106 if (vm_flags & VM_LOCKED) { 88 if (vm_flags & VM_LOCKED) {
107 up_write(&mm->mmap_sem); 89 up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
812 * so stop immediately and return an error. This 794 * so stop immediately and return an error. This
813 * probably results in a SIGSEGV. 795 * probably results in a SIGSEGV.
814 */ 796 */
815 if (!is_mpx_vma(vma)) 797 if (!(vma->vm_flags & VM_MPX))
816 return -EINVAL; 798 return -EINVAL;
817 799
818 len = min(vma->vm_end, end) - addr; 800 len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
945 * lots of tables even though we have no actual table 927 * lots of tables even though we have no actual table
946 * entries in use. 928 * entries in use.
947 */ 929 */
948 while (next && is_mpx_vma(next)) 930 while (next && (next->vm_flags & VM_MPX))
949 next = next->vm_next; 931 next = next->vm_next;
950 while (prev && is_mpx_vma(prev)) 932 while (prev && (prev->vm_flags & VM_MPX))
951 prev = prev->vm_prev; 933 prev = prev->vm_prev;
952 /* 934 /*
953 * We know 'start' and 'end' lie within an area controlled 935 * We know 'start' and 'end' lie within an area controlled
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3250f2371aea..90b924acd982 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
117 } else { 117 } else {
118 unsigned long addr; 118 unsigned long addr;
119 unsigned long nr_pages = 119 unsigned long nr_pages =
120 f->flush_end - f->flush_start / PAGE_SIZE; 120 (f->flush_end - f->flush_start) / PAGE_SIZE;
121 addr = f->flush_start; 121 addr = f->flush_start;
122 while (addr < f->flush_end) { 122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr); 123 __flush_tlb_single(addr);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 579a8fd74be0..be2e7a2b10d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
270 offsetof(struct bpf_array, map.max_entries)); 270 offsetof(struct bpf_array, map.max_entries));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
272#define OFFSET1 44 /* number of bytes to jump */ 272#define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 273 EMIT2(X86_JBE, OFFSET1); /* jbe out */
274 label1 = cnt; 274 label1 = cnt;
275 275
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
278 */ 278 */
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
281#define OFFSET2 33 281#define OFFSET2 36
282 EMIT2(X86_JA, OFFSET2); /* ja out */ 282 EMIT2(X86_JA, OFFSET2); /* ja out */
283 label2 = cnt; 283 label2 = cnt;
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 286
287 /* prog = array->prog[index]; */ 287 /* prog = array->prog[index]; */
288 EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ 288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
289 EMIT1(offsetof(struct bpf_array, prog)); 289 offsetof(struct bpf_array, prog));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ 290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
291 291
292 /* if (prog == NULL) 292 /* if (prog == NULL)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
972 972
973static int __init arch_parse_efi_cmdline(char *str) 973static int __init arch_parse_efi_cmdline(char *str)
974{ 974{
975 if (!str) {
976 pr_warn("need at least one option\n");
977 return -EINVAL;
978 }
979
975 if (parse_option_str(str, "old_map")) 980 if (parse_option_str(str, "old_map"))
976 set_bit(EFI_OLD_MEMMAP, &efi.flags); 981 set_bit(EFI_OLD_MEMMAP, &efi.flags);
977 if (parse_option_str(str, "debug")) 982 if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/fpu/internal.h> 22#include <asm/fpu/internal.h>
23#include <asm/debugreg.h> 23#include <asm/debugreg.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/mmu_context.h>
25 26
26#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
27__visible unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
153 syscall_init(); /* This sets MSR_*STAR and related */ 154 syscall_init(); /* This sets MSR_*STAR and related */
154#endif 155#endif
155 load_TR_desc(); /* This does ltr */ 156 load_TR_desc(); /* This does ltr */
156 load_LDT(&current->active_mm->context); /* This does lldt */ 157 load_mm_ldt(current->active_mm); /* This does lldt */
157 158
158 fpu__resume_cpu(); 159 fpu__resume_cpu();
159} 160}
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda867a33..484145368a24 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@ config XEN
8 select PARAVIRT_CLOCK 8 select PARAVIRT_CLOCK
9 select XEN_HAVE_PVMMU 9 select XEN_HAVE_PVMMU
10 depends on X86_64 || (X86_32 && X86_PAE) 10 depends on X86_64 || (X86_32 && X86_PAE)
11 depends on X86_TSC 11 depends on X86_LOCAL_APIC && X86_TSC
12 help 12 help
13 This is the Linux Xen port. Enabling this will allow the 13 This is the Linux Xen port. Enabling this will allow the
14 kernel to boot in a paravirtualized environment under the 14 kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
17config XEN_DOM0 17config XEN_DOM0
18 def_bool y 18 def_bool y
19 depends on XEN && PCI_XEN && SWIOTLB_XEN 19 depends on XEN && PCI_XEN && SWIOTLB_XEN
20 depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI 20 depends on X86_IO_APIC && ACPI && PCI
21 21
22config XEN_PVHVM 22config XEN_PVHVM
23 def_bool y 23 def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
14 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o 16 p2m.o apic.o
17 17
18obj-$(CONFIG_EVENT_TRACING) += trace.o 18obj-$(CONFIG_EVENT_TRACING) += trace.o
19 19
20obj-$(CONFIG_SMP) += smp.o 20obj-$(CONFIG_SMP) += smp.o
21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
23obj-$(CONFIG_XEN_DOM0) += apic.o vga.o 23obj-$(CONFIG_XEN_DOM0) += vga.o
24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o 24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
25obj-$(CONFIG_XEN_EFI) += efi.o 25obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
483 pte_t pte; 483 pte_t pte;
484 unsigned long pfn; 484 unsigned long pfn;
485 struct page *page; 485 struct page *page;
486 unsigned char dummy;
486 487
487 ptep = lookup_address((unsigned long)v, &level); 488 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL); 489 BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
492 493
493 pte = pfn_pte(pfn, prot); 494 pte = pfn_pte(pfn, prot);
494 495
496 /*
497 * Careful: update_va_mapping() will fail if the virtual address
498 * we're poking isn't populated in the page tables. We don't
499 * need to worry about the direct map (that's always in the page
500 * tables), but we need to be careful about vmap space. In
501 * particular, the top level page table can lazily propagate
502 * entries between processes, so if we've switched mms since we
503 * vmapped the target in the first place, we might not have the
504 * top-level page table entry populated.
505 *
506 * We disable preemption because we want the same mm active when
507 * we probe the target and when we issue the hypercall. We'll
508 * have the same nominal mm, but if we're a kernel thread, lazy
509 * mm dropping could change our pgd.
510 *
511 * Out of an abundance of caution, this uses __get_user() to fault
512 * in the target address just in case there's some obscure case
513 * in which the target address isn't readable.
514 */
515
516 preempt_disable();
517
518 pagefault_disable(); /* Avoid warnings due to being atomic. */
519 __get_user(dummy, (unsigned char __user __force *)v);
520 pagefault_enable();
521
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 522 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG(); 523 BUG();
497 524
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
503 BUG(); 530 BUG();
504 } else 531 } else
505 kmap_flush_unused(); 532 kmap_flush_unused();
533
534 preempt_enable();
506} 535}
507 536
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 537static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 539 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i; 540 int i;
512 541
542 /*
543 * We need to mark the all aliases of the LDT pages RO. We
544 * don't need to call vm_flush_aliases(), though, since that's
545 * only responsible for flushing aliases out the TLBs, not the
546 * page tables, and Xen will flush the TLB for us if needed.
547 *
548 * To avoid confusing future readers: none of this is necessary
549 * to load the LDT. The hypervisor only checks this when the
550 * LDT is faulted in due to subsequent descriptor access.
551 */
552
513 for(i = 0; i < entries; i += entries_per_page) 553 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 554 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515} 555}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
101 101
102#ifdef CONFIG_XEN_DOM0 102#ifdef CONFIG_XEN_DOM0
103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); 103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
104void __init xen_init_apic(void);
105#else 104#else
106static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, 105static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
107 size_t size) 106 size_t size)
108{ 107{
109} 108}
110static inline void __init xen_init_apic(void)
111{
112}
113#endif 109#endif
114 110
111void __init xen_init_apic(void);
112
115#ifdef CONFIG_XEN_EFI 113#ifdef CONFIG_XEN_EFI
116extern void xen_efi_init(void); 114extern void xen_efi_init(void);
117#else 115#else
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 14d15bf1a95b..5b478accd5fc 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += linkage.h
19generic-y += local.h 19generic-y += local.h
20generic-y += local64.h 20generic-y += local64.h
21generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
22generic-y += mm-arch-hooks.h
22generic-y += percpu.h 23generic-y += percpu.h
23generic-y += preempt.h 24generic-y += preempt.h
24generic-y += resource.h 25generic-y += resource.h
diff --git a/arch/xtensa/include/asm/mm-arch-hooks.h b/arch/xtensa/include/asm/mm-arch-hooks.h
deleted file mode 100644
index d2e5cfd3dd02..000000000000
--- a/arch/xtensa/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * Architecture specific mm hooks
3 *
4 * Copyright (C) 2015, IBM Corporation
5 * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _ASM_XTENSA_MM_ARCH_HOOKS_H
13#define _ASM_XTENSA_MM_ARCH_HOOKS_H
14
15#endif /* _ASM_XTENSA_MM_ARCH_HOOKS_H */
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0436c21db7f2..719b7152aed1 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
51 unsigned long idx = BIO_POOL_NONE; 51 unsigned long idx = BIO_POOL_NONE;
52 unsigned inline_vecs; 52 unsigned inline_vecs;
53 53
54 if (!bs) { 54 if (!bs || !bs->bio_integrity_pool) {
55 bip = kmalloc(sizeof(struct bio_integrity_payload) + 55 bip = kmalloc(sizeof(struct bio_integrity_payload) +
56 sizeof(struct bio_vec) * nr_vecs, gfp_mask); 56 sizeof(struct bio_vec) * nr_vecs, gfp_mask);
57 inline_vecs = nr_vecs; 57 inline_vecs = nr_vecs;
@@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
104 kfree(page_address(bip->bip_vec->bv_page) + 104 kfree(page_address(bip->bip_vec->bv_page) +
105 bip->bip_vec->bv_offset); 105 bip->bip_vec->bv_offset);
106 106
107 if (bs) { 107 if (bs && bs->bio_integrity_pool) {
108 if (bip->bip_slab != BIO_POOL_NONE) 108 if (bip->bip_slab != BIO_POOL_NONE)
109 bvec_free(bs->bvec_integrity_pool, bip->bip_vec, 109 bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
110 bip->bip_slab); 110 bip->bip_slab);
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..d6e5ba3399f0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
1831 * Allocates and returns a new bio which represents @sectors from the start of 1831 * Allocates and returns a new bio which represents @sectors from the start of
1832 * @bio, and updates @bio to represent the remaining sectors. 1832 * @bio, and updates @bio to represent the remaining sectors.
1833 * 1833 *
1834 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's 1834 * Unless this is a discard request the newly allocated bio will point
1835 * responsibility to ensure that @bio is not freed before the split. 1835 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1836 * @bio is not freed before the split.
1836 */ 1837 */
1837struct bio *bio_split(struct bio *bio, int sectors, 1838struct bio *bio_split(struct bio *bio, int sectors,
1838 gfp_t gfp, struct bio_set *bs) 1839 gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
1842 BUG_ON(sectors <= 0); 1843 BUG_ON(sectors <= 0);
1843 BUG_ON(sectors >= bio_sectors(bio)); 1844 BUG_ON(sectors >= bio_sectors(bio));
1844 1845
1845 split = bio_clone_fast(bio, gfp, bs); 1846 /*
1847 * Discards need a mutable bio_vec to accommodate the payload
1848 * required by the DSM TRIM and UNMAP commands.
1849 */
1850 if (bio->bi_rw & REQ_DISCARD)
1851 split = bio_clone_bioset(bio, gfp, bs);
1852 else
1853 split = bio_clone_fast(bio, gfp, bs);
1854
1846 if (!split) 1855 if (!split)
1847 return NULL; 1856 return NULL;
1848 1857
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2009 bio->bi_css = blkcg_css; 2018 bio->bi_css = blkcg_css;
2010 return 0; 2019 return 0;
2011} 2020}
2021EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2012 2022
2013/** 2023/**
2014 * bio_associate_current - associate a bio with %current 2024 * bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
2039 bio->bi_css = task_get_css(current, blkio_cgrp_id); 2049 bio->bi_css = task_get_css(current, blkio_cgrp_id);
2040 return 0; 2050 return 0;
2041} 2051}
2052EXPORT_SYMBOL_GPL(bio_associate_current);
2042 2053
2043/** 2054/**
2044 * bio_disassociate_task - undo bio_associate_current() 2055 * bio_disassociate_task - undo bio_associate_current()
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9f97da52d006..d6283b3f5db5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -29,6 +29,14 @@
29 29
30#define MAX_KEY_LEN 100 30#define MAX_KEY_LEN 100
31 31
32/*
33 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
34 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
35 * policy [un]register operations including cgroup file additions /
36 * removals. Putting cgroup file registration outside blkcg_pol_mutex
37 * allows grabbing it from cgroup callbacks.
38 */
39static DEFINE_MUTEX(blkcg_pol_register_mutex);
32static DEFINE_MUTEX(blkcg_pol_mutex); 40static DEFINE_MUTEX(blkcg_pol_mutex);
33 41
34struct blkcg blkcg_root; 42struct blkcg blkcg_root;
@@ -38,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
38 46
39static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; 47static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
40 48
49static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
50
41static bool blkcg_policy_enabled(struct request_queue *q, 51static bool blkcg_policy_enabled(struct request_queue *q,
42 const struct blkcg_policy *pol) 52 const struct blkcg_policy *pol)
43{ 53{
@@ -453,20 +463,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
453 struct blkcg_gq *blkg; 463 struct blkcg_gq *blkg;
454 int i; 464 int i;
455 465
456 /* 466 mutex_lock(&blkcg_pol_mutex);
457 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
458 * which ends up putting cgroup's internal cgroup_tree_mutex under
459 * it; however, cgroup_tree_mutex is nested above cgroup file
460 * active protection and grabbing blkcg_pol_mutex from a cgroup
461 * file operation creates a possible circular dependency. cgroup
462 * internal locking is planned to go through further simplification
463 * and this issue should go away soon. For now, let's trylock
464 * blkcg_pol_mutex and restart the write on failure.
465 *
466 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
467 */
468 if (!mutex_trylock(&blkcg_pol_mutex))
469 return restart_syscall();
470 spin_lock_irq(&blkcg->lock); 467 spin_lock_irq(&blkcg->lock);
471 468
472 /* 469 /*
@@ -721,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
721 return -EINVAL; 718 return -EINVAL;
722 719
723 disk = get_gendisk(MKDEV(major, minor), &part); 720 disk = get_gendisk(MKDEV(major, minor), &part);
724 if (!disk || part) 721 if (!disk)
722 return -EINVAL;
723 if (part) {
724 put_disk(disk);
725 return -EINVAL; 725 return -EINVAL;
726 }
726 727
727 rcu_read_lock(); 728 rcu_read_lock();
728 spin_lock_irq(disk->queue->queue_lock); 729 spin_lock_irq(disk->queue->queue_lock);
@@ -822,8 +823,17 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
822{ 823{
823 struct blkcg *blkcg = css_to_blkcg(css); 824 struct blkcg *blkcg = css_to_blkcg(css);
824 825
825 if (blkcg != &blkcg_root) 826 mutex_lock(&blkcg_pol_mutex);
827 list_del(&blkcg->all_blkcgs_node);
828 mutex_unlock(&blkcg_pol_mutex);
829
830 if (blkcg != &blkcg_root) {
831 int i;
832
833 for (i = 0; i < BLKCG_MAX_POLS; i++)
834 kfree(blkcg->pd[i]);
826 kfree(blkcg); 835 kfree(blkcg);
836 }
827} 837}
828 838
829static struct cgroup_subsys_state * 839static struct cgroup_subsys_state *
@@ -833,6 +843,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
833 struct cgroup_subsys_state *ret; 843 struct cgroup_subsys_state *ret;
834 int i; 844 int i;
835 845
846 mutex_lock(&blkcg_pol_mutex);
847
836 if (!parent_css) { 848 if (!parent_css) {
837 blkcg = &blkcg_root; 849 blkcg = &blkcg_root;
838 goto done; 850 goto done;
@@ -875,14 +887,17 @@ done:
875#ifdef CONFIG_CGROUP_WRITEBACK 887#ifdef CONFIG_CGROUP_WRITEBACK
876 INIT_LIST_HEAD(&blkcg->cgwb_list); 888 INIT_LIST_HEAD(&blkcg->cgwb_list);
877#endif 889#endif
890 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
891
892 mutex_unlock(&blkcg_pol_mutex);
878 return &blkcg->css; 893 return &blkcg->css;
879 894
880free_pd_blkcg: 895free_pd_blkcg:
881 for (i--; i >= 0; i--) 896 for (i--; i >= 0; i--)
882 kfree(blkcg->pd[i]); 897 kfree(blkcg->pd[i]);
883
884free_blkcg: 898free_blkcg:
885 kfree(blkcg); 899 kfree(blkcg);
900 mutex_unlock(&blkcg_pol_mutex);
886 return ret; 901 return ret;
887} 902}
888 903
@@ -1037,10 +1052,8 @@ int blkcg_activate_policy(struct request_queue *q,
1037 const struct blkcg_policy *pol) 1052 const struct blkcg_policy *pol)
1038{ 1053{
1039 LIST_HEAD(pds); 1054 LIST_HEAD(pds);
1040 LIST_HEAD(cpds);
1041 struct blkcg_gq *blkg; 1055 struct blkcg_gq *blkg;
1042 struct blkg_policy_data *pd, *nd; 1056 struct blkg_policy_data *pd, *nd;
1043 struct blkcg_policy_data *cpd, *cnd;
1044 int cnt = 0, ret; 1057 int cnt = 0, ret;
1045 1058
1046 if (blkcg_policy_enabled(q, pol)) 1059 if (blkcg_policy_enabled(q, pol))
@@ -1053,10 +1066,7 @@ int blkcg_activate_policy(struct request_queue *q,
1053 cnt++; 1066 cnt++;
1054 spin_unlock_irq(q->queue_lock); 1067 spin_unlock_irq(q->queue_lock);
1055 1068
1056 /* 1069 /* allocate per-blkg policy data for all existing blkgs */
1057 * Allocate per-blkg and per-blkcg policy data
1058 * for all existing blkgs.
1059 */
1060 while (cnt--) { 1070 while (cnt--) {
1061 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); 1071 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
1062 if (!pd) { 1072 if (!pd) {
@@ -1064,15 +1074,6 @@ int blkcg_activate_policy(struct request_queue *q,
1064 goto out_free; 1074 goto out_free;
1065 } 1075 }
1066 list_add_tail(&pd->alloc_node, &pds); 1076 list_add_tail(&pd->alloc_node, &pds);
1067
1068 if (!pol->cpd_size)
1069 continue;
1070 cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
1071 if (!cpd) {
1072 ret = -ENOMEM;
1073 goto out_free;
1074 }
1075 list_add_tail(&cpd->alloc_node, &cpds);
1076 } 1077 }
1077 1078
1078 /* 1079 /*
@@ -1082,32 +1083,17 @@ int blkcg_activate_policy(struct request_queue *q,
1082 spin_lock_irq(q->queue_lock); 1083 spin_lock_irq(q->queue_lock);
1083 1084
1084 list_for_each_entry(blkg, &q->blkg_list, q_node) { 1085 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1085 if (WARN_ON(list_empty(&pds)) || 1086 if (WARN_ON(list_empty(&pds))) {
1086 WARN_ON(pol->cpd_size && list_empty(&cpds))) {
1087 /* umm... this shouldn't happen, just abort */ 1087 /* umm... this shouldn't happen, just abort */
1088 ret = -ENOMEM; 1088 ret = -ENOMEM;
1089 goto out_unlock; 1089 goto out_unlock;
1090 } 1090 }
1091 cpd = list_first_entry(&cpds, struct blkcg_policy_data,
1092 alloc_node);
1093 list_del_init(&cpd->alloc_node);
1094 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); 1091 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1095 list_del_init(&pd->alloc_node); 1092 list_del_init(&pd->alloc_node);
1096 1093
1097 /* grab blkcg lock too while installing @pd on @blkg */ 1094 /* grab blkcg lock too while installing @pd on @blkg */
1098 spin_lock(&blkg->blkcg->lock); 1095 spin_lock(&blkg->blkcg->lock);
1099 1096
1100 if (!pol->cpd_size)
1101 goto no_cpd;
1102 if (!blkg->blkcg->pd[pol->plid]) {
1103 /* Per-policy per-blkcg data */
1104 blkg->blkcg->pd[pol->plid] = cpd;
1105 cpd->plid = pol->plid;
1106 pol->cpd_init_fn(blkg->blkcg);
1107 } else { /* must free it as it has already been extracted */
1108 kfree(cpd);
1109 }
1110no_cpd:
1111 blkg->pd[pol->plid] = pd; 1097 blkg->pd[pol->plid] = pd;
1112 pd->blkg = blkg; 1098 pd->blkg = blkg;
1113 pd->plid = pol->plid; 1099 pd->plid = pol->plid;
@@ -1124,8 +1110,6 @@ out_free:
1124 blk_queue_bypass_end(q); 1110 blk_queue_bypass_end(q);
1125 list_for_each_entry_safe(pd, nd, &pds, alloc_node) 1111 list_for_each_entry_safe(pd, nd, &pds, alloc_node)
1126 kfree(pd); 1112 kfree(pd);
1127 list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
1128 kfree(cpd);
1129 return ret; 1113 return ret;
1130} 1114}
1131EXPORT_SYMBOL_GPL(blkcg_activate_policy); 1115EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1162,8 +1146,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
1162 1146
1163 kfree(blkg->pd[pol->plid]); 1147 kfree(blkg->pd[pol->plid]);
1164 blkg->pd[pol->plid] = NULL; 1148 blkg->pd[pol->plid] = NULL;
1165 kfree(blkg->blkcg->pd[pol->plid]);
1166 blkg->blkcg->pd[pol->plid] = NULL;
1167 1149
1168 spin_unlock(&blkg->blkcg->lock); 1150 spin_unlock(&blkg->blkcg->lock);
1169 } 1151 }
@@ -1182,11 +1164,13 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1182 */ 1164 */
1183int blkcg_policy_register(struct blkcg_policy *pol) 1165int blkcg_policy_register(struct blkcg_policy *pol)
1184{ 1166{
1167 struct blkcg *blkcg;
1185 int i, ret; 1168 int i, ret;
1186 1169
1187 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) 1170 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1188 return -EINVAL; 1171 return -EINVAL;
1189 1172
1173 mutex_lock(&blkcg_pol_register_mutex);
1190 mutex_lock(&blkcg_pol_mutex); 1174 mutex_lock(&blkcg_pol_mutex);
1191 1175
1192 /* find an empty slot */ 1176 /* find an empty slot */
@@ -1195,19 +1179,49 @@ int blkcg_policy_register(struct blkcg_policy *pol)
1195 if (!blkcg_policy[i]) 1179 if (!blkcg_policy[i])
1196 break; 1180 break;
1197 if (i >= BLKCG_MAX_POLS) 1181 if (i >= BLKCG_MAX_POLS)
1198 goto out_unlock; 1182 goto err_unlock;
1199 1183
1200 /* register and update blkgs */ 1184 /* register @pol */
1201 pol->plid = i; 1185 pol->plid = i;
1202 blkcg_policy[i] = pol; 1186 blkcg_policy[pol->plid] = pol;
1187
1188 /* allocate and install cpd's */
1189 if (pol->cpd_size) {
1190 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1191 struct blkcg_policy_data *cpd;
1192
1193 cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
1194 if (!cpd) {
1195 mutex_unlock(&blkcg_pol_mutex);
1196 goto err_free_cpds;
1197 }
1198
1199 blkcg->pd[pol->plid] = cpd;
1200 cpd->plid = pol->plid;
1201 pol->cpd_init_fn(blkcg);
1202 }
1203 }
1204
1205 mutex_unlock(&blkcg_pol_mutex);
1203 1206
1204 /* everything is in place, add intf files for the new policy */ 1207 /* everything is in place, add intf files for the new policy */
1205 if (pol->cftypes) 1208 if (pol->cftypes)
1206 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys, 1209 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
1207 pol->cftypes)); 1210 pol->cftypes));
1208 ret = 0; 1211 mutex_unlock(&blkcg_pol_register_mutex);
1209out_unlock: 1212 return 0;
1213
1214err_free_cpds:
1215 if (pol->cpd_size) {
1216 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1217 kfree(blkcg->pd[pol->plid]);
1218 blkcg->pd[pol->plid] = NULL;
1219 }
1220 }
1221 blkcg_policy[pol->plid] = NULL;
1222err_unlock:
1210 mutex_unlock(&blkcg_pol_mutex); 1223 mutex_unlock(&blkcg_pol_mutex);
1224 mutex_unlock(&blkcg_pol_register_mutex);
1211 return ret; 1225 return ret;
1212} 1226}
1213EXPORT_SYMBOL_GPL(blkcg_policy_register); 1227EXPORT_SYMBOL_GPL(blkcg_policy_register);
@@ -1220,7 +1234,9 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
1220 */ 1234 */
1221void blkcg_policy_unregister(struct blkcg_policy *pol) 1235void blkcg_policy_unregister(struct blkcg_policy *pol)
1222{ 1236{
1223 mutex_lock(&blkcg_pol_mutex); 1237 struct blkcg *blkcg;
1238
1239 mutex_lock(&blkcg_pol_register_mutex);
1224 1240
1225 if (WARN_ON(blkcg_policy[pol->plid] != pol)) 1241 if (WARN_ON(blkcg_policy[pol->plid] != pol))
1226 goto out_unlock; 1242 goto out_unlock;
@@ -1229,9 +1245,19 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
1229 if (pol->cftypes) 1245 if (pol->cftypes)
1230 cgroup_rm_cftypes(pol->cftypes); 1246 cgroup_rm_cftypes(pol->cftypes);
1231 1247
1232 /* unregister and update blkgs */ 1248 /* remove cpds and unregister */
1249 mutex_lock(&blkcg_pol_mutex);
1250
1251 if (pol->cpd_size) {
1252 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1253 kfree(blkcg->pd[pol->plid]);
1254 blkcg->pd[pol->plid] = NULL;
1255 }
1256 }
1233 blkcg_policy[pol->plid] = NULL; 1257 blkcg_policy[pol->plid] = NULL;
1234out_unlock: 1258
1235 mutex_unlock(&blkcg_pol_mutex); 1259 mutex_unlock(&blkcg_pol_mutex);
1260out_unlock:
1261 mutex_unlock(&blkcg_pol_register_mutex);
1236} 1262}
1237EXPORT_SYMBOL_GPL(blkcg_policy_unregister); 1263EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
diff --git a/block/blk-core.c b/block/blk-core.c
index 82819e68f58b..627ed0c593fb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
3370int __init blk_dev_init(void) 3370int __init blk_dev_init(void)
3371{ 3371{
3372 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 3372 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3373 sizeof(((struct request *)0)->cmd_flags)); 3373 FIELD_SIZEOF(struct request, cmd_flags));
3374 3374
3375 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 3375 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3376 kblockd_workqueue = alloc_workqueue("kblockd", 3376 kblockd_workqueue = alloc_workqueue("kblockd",
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f53779692c77..7d842db59699 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1998,7 +1998,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1998 goto err_hctxs; 1998 goto err_hctxs;
1999 1999
2000 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); 2000 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
2001 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000); 2001 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2002 2002
2003 q->nr_queues = nr_cpu_ids; 2003 q->nr_queues = nr_cpu_ids;
2004 q->nr_hw_queues = set->nr_hw_queues; 2004 q->nr_hw_queues = set->nr_hw_queues;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bfffca9..e0057d035200 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
241 * Description: 241 * Description:
242 * Enables a low level driver to set a hard upper limit, 242 * Enables a low level driver to set a hard upper limit,
243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
244 * the device driver based upon the combined capabilities of I/O 244 * the device driver based upon the capabilities of the I/O
245 * controller and storage device. 245 * controller.
246 * 246 *
247 * max_sectors is a soft limit imposed by the block layer for 247 * max_sectors is a soft limit imposed by the block layer for
248 * filesystem type requests. This value can be overridden on a 248 * filesystem type requests. This value can be overridden on a
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da6770bc9e..b8efe36ce114 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
393 struct scatterlist *cipher = areq_ctx->cipher; 393 struct scatterlist *cipher = areq_ctx->cipher;
394 struct scatterlist *hsg = areq_ctx->hsg; 394 struct scatterlist *hsg = areq_ctx->hsg;
395 struct scatterlist *tsg = areq_ctx->tsg; 395 struct scatterlist *tsg = areq_ctx->tsg;
396 struct scatterlist *assoc1;
397 struct scatterlist *assoc2;
398 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 396 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
399 unsigned int cryptlen = req->cryptlen; 397 unsigned int cryptlen = req->cryptlen;
400 struct page *dstp; 398 struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
412 cryptlen += ivsize; 410 cryptlen += ivsize;
413 } 411 }
414 412
415 if (sg_is_last(assoc)) 413 if (assoc->length < 12)
416 return -EINVAL;
417
418 assoc1 = assoc + 1;
419 if (sg_is_last(assoc1))
420 return -EINVAL;
421
422 assoc2 = assoc + 2;
423 if (!sg_is_last(assoc2))
424 return -EINVAL; 414 return -EINVAL;
425 415
426 sg_init_table(hsg, 2); 416 sg_init_table(hsg, 2);
427 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 417 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
428 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 418 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
429 419
430 sg_init_table(tsg, 1); 420 sg_init_table(tsg, 1);
431 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 421 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
432 422
433 areq_ctx->cryptlen = cryptlen; 423 areq_ctx->cryptlen = cryptlen;
434 areq_ctx->headlen = assoc->length + assoc2->length; 424 areq_ctx->headlen = 8;
435 areq_ctx->trailen = assoc1->length; 425 areq_ctx->trailen = 4;
436 areq_ctx->sg = dst; 426 areq_ctx->sg = dst;
437 427
438 areq_ctx->complete = authenc_esn_geniv_ahash_done; 428 areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
563 struct scatterlist *cipher = areq_ctx->cipher; 553 struct scatterlist *cipher = areq_ctx->cipher;
564 struct scatterlist *hsg = areq_ctx->hsg; 554 struct scatterlist *hsg = areq_ctx->hsg;
565 struct scatterlist *tsg = areq_ctx->tsg; 555 struct scatterlist *tsg = areq_ctx->tsg;
566 struct scatterlist *assoc1;
567 struct scatterlist *assoc2;
568 unsigned int ivsize = crypto_aead_ivsize(authenc_esn); 556 unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
569 struct page *srcp; 557 struct page *srcp;
570 u8 *vsrc; 558 u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
580 cryptlen += ivsize; 568 cryptlen += ivsize;
581 } 569 }
582 570
583 if (sg_is_last(assoc)) 571 if (assoc->length < 12)
584 return -EINVAL;
585
586 assoc1 = assoc + 1;
587 if (sg_is_last(assoc1))
588 return -EINVAL;
589
590 assoc2 = assoc + 2;
591 if (!sg_is_last(assoc2))
592 return -EINVAL; 572 return -EINVAL;
593 573
594 sg_init_table(hsg, 2); 574 sg_init_table(hsg, 2);
595 sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset); 575 sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
596 sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset); 576 sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
597 577
598 sg_init_table(tsg, 1); 578 sg_init_table(tsg, 1);
599 sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset); 579 sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
600 580
601 areq_ctx->cryptlen = cryptlen; 581 areq_ctx->cryptlen = cryptlen;
602 areq_ctx->headlen = assoc->length + assoc2->length; 582 areq_ctx->headlen = 8;
603 areq_ctx->trailen = assoc1->length; 583 areq_ctx->trailen = 4;
604 areq_ctx->sg = src; 584 areq_ctx->sg = src;
605 585
606 areq_ctx->complete = authenc_esn_verify_ahash_done; 586 areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 569ee090343f..46b58abb08c5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
352 pdata->mmio_size = resource_size(rentry->res); 352 pdata->mmio_size = resource_size(rentry->res);
353 pdata->mmio_base = ioremap(rentry->res->start, 353 pdata->mmio_base = ioremap(rentry->res->start,
354 pdata->mmio_size); 354 pdata->mmio_size);
355 if (!pdata->mmio_base)
356 goto err_out;
357 break; 355 break;
358 } 356 }
359 357
360 acpi_dev_free_resource_list(&resource_list); 358 acpi_dev_free_resource_list(&resource_list);
361 359
360 if (!pdata->mmio_base) {
361 ret = -ENOMEM;
362 goto err_out;
363 }
364
362 pdata->dev_desc = dev_desc; 365 pdata->dev_desc = dev_desc;
363 366
364 if (dev_desc->setup) 367 if (dev_desc->setup)
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..88dbbb115285 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
231 dev_warn(&device->dev, "Failed to change power state to %s\n", 231 dev_warn(&device->dev, "Failed to change power state to %s\n",
232 acpi_power_state_string(state)); 232 acpi_power_state_string(state));
233 } else { 233 } else {
234 device->power.state = state; 234 device->power.state = target_state;
235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
236 "Device [%s] transitioned to %s\n", 236 "Device [%s] transitioned to %s\n",
237 device->pnp.bus_id, 237 device->pnp.bus_id,
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 2161fa178c8d..628a42c41ab1 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/sort.h> 20#include <linux/sort.h>
21#include <linux/pmem.h>
21#include <linux/io.h> 22#include <linux/io.h>
22#include "nfit.h" 23#include "nfit.h"
23 24
@@ -305,6 +306,23 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
305 return true; 306 return true;
306} 307}
307 308
309static bool add_flush(struct acpi_nfit_desc *acpi_desc,
310 struct acpi_nfit_flush_address *flush)
311{
312 struct device *dev = acpi_desc->dev;
313 struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
314 GFP_KERNEL);
315
316 if (!nfit_flush)
317 return false;
318 INIT_LIST_HEAD(&nfit_flush->list);
319 nfit_flush->flush = flush;
320 list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
321 dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
322 flush->device_handle, flush->hint_count);
323 return true;
324}
325
308static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table, 326static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
309 const void *end) 327 const void *end)
310{ 328{
@@ -338,7 +356,8 @@ static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
338 return err; 356 return err;
339 break; 357 break;
340 case ACPI_NFIT_TYPE_FLUSH_ADDRESS: 358 case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
341 dev_dbg(dev, "%s: flush\n", __func__); 359 if (!add_flush(acpi_desc, table))
360 return err;
342 break; 361 break;
343 case ACPI_NFIT_TYPE_SMBIOS: 362 case ACPI_NFIT_TYPE_SMBIOS:
344 dev_dbg(dev, "%s: smbios\n", __func__); 363 dev_dbg(dev, "%s: smbios\n", __func__);
@@ -389,6 +408,7 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
389{ 408{
390 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 409 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
391 struct nfit_memdev *nfit_memdev; 410 struct nfit_memdev *nfit_memdev;
411 struct nfit_flush *nfit_flush;
392 struct nfit_dcr *nfit_dcr; 412 struct nfit_dcr *nfit_dcr;
393 struct nfit_bdw *nfit_bdw; 413 struct nfit_bdw *nfit_bdw;
394 struct nfit_idt *nfit_idt; 414 struct nfit_idt *nfit_idt;
@@ -442,6 +462,14 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
442 nfit_mem->idt_bdw = nfit_idt->idt; 462 nfit_mem->idt_bdw = nfit_idt->idt;
443 break; 463 break;
444 } 464 }
465
466 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
467 if (nfit_flush->flush->device_handle !=
468 nfit_memdev->memdev->device_handle)
469 continue;
470 nfit_mem->nfit_flush = nfit_flush;
471 break;
472 }
445 break; 473 break;
446 } 474 }
447 475
@@ -978,6 +1006,24 @@ static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
978 return mmio->base_offset + line_offset + table_offset + sub_line_offset; 1006 return mmio->base_offset + line_offset + table_offset + sub_line_offset;
979} 1007}
980 1008
1009static void wmb_blk(struct nfit_blk *nfit_blk)
1010{
1011
1012 if (nfit_blk->nvdimm_flush) {
1013 /*
1014 * The first wmb() is needed to 'sfence' all previous writes
1015 * such that they are architecturally visible for the platform
1016 * buffer flush. Note that we've already arranged for pmem
1017 * writes to avoid the cache via arch_memcpy_to_pmem(). The
1018 * final wmb() ensures ordering for the NVDIMM flush write.
1019 */
1020 wmb();
1021 writeq(1, nfit_blk->nvdimm_flush);
1022 wmb();
1023 } else
1024 wmb_pmem();
1025}
1026
981static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) 1027static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
982{ 1028{
983 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; 1029 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
@@ -1012,7 +1058,10 @@ static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1012 offset = to_interleave_offset(offset, mmio); 1058 offset = to_interleave_offset(offset, mmio);
1013 1059
1014 writeq(cmd, mmio->base + offset); 1060 writeq(cmd, mmio->base + offset);
1015 /* FIXME: conditionally perform read-back if mandated by firmware */ 1061 wmb_blk(nfit_blk);
1062
1063 if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
1064 readq(mmio->base + offset);
1016} 1065}
1017 1066
1018static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, 1067static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
@@ -1026,7 +1075,6 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1026 1075
1027 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES 1076 base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1028 + lane * mmio->size; 1077 + lane * mmio->size;
1029 /* TODO: non-temporal access, flush hints, cache management etc... */
1030 write_blk_ctl(nfit_blk, lane, dpa, len, rw); 1078 write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1031 while (len) { 1079 while (len) {
1032 unsigned int c; 1080 unsigned int c;
@@ -1045,13 +1093,19 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1045 } 1093 }
1046 1094
1047 if (rw) 1095 if (rw)
1048 memcpy(mmio->aperture + offset, iobuf + copied, c); 1096 memcpy_to_pmem(mmio->aperture + offset,
1097 iobuf + copied, c);
1049 else 1098 else
1050 memcpy(iobuf + copied, mmio->aperture + offset, c); 1099 memcpy_from_pmem(iobuf + copied,
1100 mmio->aperture + offset, c);
1051 1101
1052 copied += c; 1102 copied += c;
1053 len -= c; 1103 len -= c;
1054 } 1104 }
1105
1106 if (rw)
1107 wmb_blk(nfit_blk);
1108
1055 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; 1109 rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1056 return rc; 1110 return rc;
1057} 1111}
@@ -1124,7 +1178,7 @@ static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1124} 1178}
1125 1179
1126static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1180static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1127 struct acpi_nfit_system_address *spa) 1181 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1128{ 1182{
1129 resource_size_t start = spa->address; 1183 resource_size_t start = spa->address;
1130 resource_size_t n = spa->length; 1184 resource_size_t n = spa->length;
@@ -1152,8 +1206,15 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1152 if (!res) 1206 if (!res)
1153 goto err_mem; 1207 goto err_mem;
1154 1208
1155 /* TODO: cacheability based on the spa type */ 1209 if (type == SPA_MAP_APERTURE) {
1156 spa_map->iomem = ioremap_nocache(start, n); 1210 /*
1211 * TODO: memremap_pmem() support, but that requires cache
1212 * flushing when the aperture is moved.
1213 */
1214 spa_map->iomem = ioremap_wc(start, n);
1215 } else
1216 spa_map->iomem = ioremap_nocache(start, n);
1217
1157 if (!spa_map->iomem) 1218 if (!spa_map->iomem)
1158 goto err_map; 1219 goto err_map;
1159 1220
@@ -1171,6 +1232,7 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1171 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges 1232 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1172 * @nvdimm_bus: NFIT-bus that provided the spa table entry 1233 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1173 * @nfit_spa: spa table to map 1234 * @nfit_spa: spa table to map
1235 * @type: aperture or control region
1174 * 1236 *
1175 * In the case where block-data-window apertures and 1237 * In the case where block-data-window apertures and
1176 * dimm-control-regions are interleaved they will end up sharing a 1238 * dimm-control-regions are interleaved they will end up sharing a
@@ -1180,12 +1242,12 @@ static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1180 * unbound. 1242 * unbound.
1181 */ 1243 */
1182static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc, 1244static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1183 struct acpi_nfit_system_address *spa) 1245 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1184{ 1246{
1185 void __iomem *iomem; 1247 void __iomem *iomem;
1186 1248
1187 mutex_lock(&acpi_desc->spa_map_mutex); 1249 mutex_lock(&acpi_desc->spa_map_mutex);
1188 iomem = __nfit_spa_map(acpi_desc, spa); 1250 iomem = __nfit_spa_map(acpi_desc, spa, type);
1189 mutex_unlock(&acpi_desc->spa_map_mutex); 1251 mutex_unlock(&acpi_desc->spa_map_mutex);
1190 1252
1191 return iomem; 1253 return iomem;
@@ -1206,12 +1268,35 @@ static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1206 return 0; 1268 return 0;
1207} 1269}
1208 1270
1271static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1272 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1273{
1274 struct nd_cmd_dimm_flags flags;
1275 int rc;
1276
1277 memset(&flags, 0, sizeof(flags));
1278 rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1279 sizeof(flags));
1280
1281 if (rc >= 0 && flags.status == 0)
1282 nfit_blk->dimm_flags = flags.flags;
1283 else if (rc == -ENOTTY) {
1284 /* fall back to a conservative default */
1285 nfit_blk->dimm_flags = ND_BLK_DCR_LATCH;
1286 rc = 0;
1287 } else
1288 rc = -ENXIO;
1289
1290 return rc;
1291}
1292
1209static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, 1293static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1210 struct device *dev) 1294 struct device *dev)
1211{ 1295{
1212 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); 1296 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1213 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 1297 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1214 struct nd_blk_region *ndbr = to_nd_blk_region(dev); 1298 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1299 struct nfit_flush *nfit_flush;
1215 struct nfit_blk_mmio *mmio; 1300 struct nfit_blk_mmio *mmio;
1216 struct nfit_blk *nfit_blk; 1301 struct nfit_blk *nfit_blk;
1217 struct nfit_mem *nfit_mem; 1302 struct nfit_mem *nfit_mem;
@@ -1223,8 +1308,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1223 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { 1308 if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1224 dev_dbg(dev, "%s: missing%s%s%s\n", __func__, 1309 dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1225 nfit_mem ? "" : " nfit_mem", 1310 nfit_mem ? "" : " nfit_mem",
1226 nfit_mem->dcr ? "" : " dcr", 1311 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1227 nfit_mem->bdw ? "" : " bdw"); 1312 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
1228 return -ENXIO; 1313 return -ENXIO;
1229 } 1314 }
1230 1315
@@ -1237,7 +1322,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1237 /* map block aperture memory */ 1322 /* map block aperture memory */
1238 nfit_blk->bdw_offset = nfit_mem->bdw->offset; 1323 nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1239 mmio = &nfit_blk->mmio[BDW]; 1324 mmio = &nfit_blk->mmio[BDW];
1240 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw); 1325 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
1326 SPA_MAP_APERTURE);
1241 if (!mmio->base) { 1327 if (!mmio->base) {
1242 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, 1328 dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1243 nvdimm_name(nvdimm)); 1329 nvdimm_name(nvdimm));
@@ -1259,7 +1345,8 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1259 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; 1345 nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1260 nfit_blk->stat_offset = nfit_mem->dcr->status_offset; 1346 nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1261 mmio = &nfit_blk->mmio[DCR]; 1347 mmio = &nfit_blk->mmio[DCR];
1262 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr); 1348 mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
1349 SPA_MAP_CONTROL);
1263 if (!mmio->base) { 1350 if (!mmio->base) {
1264 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, 1351 dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1265 nvdimm_name(nvdimm)); 1352 nvdimm_name(nvdimm));
@@ -1277,6 +1364,24 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1277 return rc; 1364 return rc;
1278 } 1365 }
1279 1366
1367 rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1368 if (rc < 0) {
1369 dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1370 __func__, nvdimm_name(nvdimm));
1371 return rc;
1372 }
1373
1374 nfit_flush = nfit_mem->nfit_flush;
1375 if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1376 nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1377 nfit_flush->flush->hint_address[0], 8);
1378 if (!nfit_blk->nvdimm_flush)
1379 return -ENOMEM;
1380 }
1381
1382 if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
1383 dev_warn(dev, "unable to guarantee persistence of writes\n");
1384
1280 if (mmio->line_size == 0) 1385 if (mmio->line_size == 0)
1281 return 0; 1386 return 0;
1282 1387
@@ -1459,6 +1564,7 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1459 INIT_LIST_HEAD(&acpi_desc->dcrs); 1564 INIT_LIST_HEAD(&acpi_desc->dcrs);
1460 INIT_LIST_HEAD(&acpi_desc->bdws); 1565 INIT_LIST_HEAD(&acpi_desc->bdws);
1461 INIT_LIST_HEAD(&acpi_desc->idts); 1566 INIT_LIST_HEAD(&acpi_desc->idts);
1567 INIT_LIST_HEAD(&acpi_desc->flushes);
1462 INIT_LIST_HEAD(&acpi_desc->memdevs); 1568 INIT_LIST_HEAD(&acpi_desc->memdevs);
1463 INIT_LIST_HEAD(&acpi_desc->dimms); 1569 INIT_LIST_HEAD(&acpi_desc->dimms);
1464 mutex_init(&acpi_desc->spa_map_mutex); 1570 mutex_init(&acpi_desc->spa_map_mutex);
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index 81f2e8c5a79c..79b6d83875c1 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -40,6 +40,10 @@ enum nfit_uuids {
40 NFIT_UUID_MAX, 40 NFIT_UUID_MAX,
41}; 41};
42 42
43enum {
44 ND_BLK_DCR_LATCH = 2,
45};
46
43struct nfit_spa { 47struct nfit_spa {
44 struct acpi_nfit_system_address *spa; 48 struct acpi_nfit_system_address *spa;
45 struct list_head list; 49 struct list_head list;
@@ -60,6 +64,11 @@ struct nfit_idt {
60 struct list_head list; 64 struct list_head list;
61}; 65};
62 66
67struct nfit_flush {
68 struct acpi_nfit_flush_address *flush;
69 struct list_head list;
70};
71
63struct nfit_memdev { 72struct nfit_memdev {
64 struct acpi_nfit_memory_map *memdev; 73 struct acpi_nfit_memory_map *memdev;
65 struct list_head list; 74 struct list_head list;
@@ -77,6 +86,7 @@ struct nfit_mem {
77 struct acpi_nfit_system_address *spa_bdw; 86 struct acpi_nfit_system_address *spa_bdw;
78 struct acpi_nfit_interleave *idt_dcr; 87 struct acpi_nfit_interleave *idt_dcr;
79 struct acpi_nfit_interleave *idt_bdw; 88 struct acpi_nfit_interleave *idt_bdw;
89 struct nfit_flush *nfit_flush;
80 struct list_head list; 90 struct list_head list;
81 struct acpi_device *adev; 91 struct acpi_device *adev;
82 unsigned long dsm_mask; 92 unsigned long dsm_mask;
@@ -88,6 +98,7 @@ struct acpi_nfit_desc {
88 struct mutex spa_map_mutex; 98 struct mutex spa_map_mutex;
89 struct list_head spa_maps; 99 struct list_head spa_maps;
90 struct list_head memdevs; 100 struct list_head memdevs;
101 struct list_head flushes;
91 struct list_head dimms; 102 struct list_head dimms;
92 struct list_head spas; 103 struct list_head spas;
93 struct list_head dcrs; 104 struct list_head dcrs;
@@ -109,7 +120,7 @@ struct nfit_blk {
109 struct nfit_blk_mmio { 120 struct nfit_blk_mmio {
110 union { 121 union {
111 void __iomem *base; 122 void __iomem *base;
112 void *aperture; 123 void __pmem *aperture;
113 }; 124 };
114 u64 size; 125 u64 size;
115 u64 base_offset; 126 u64 base_offset;
@@ -123,6 +134,13 @@ struct nfit_blk {
123 u64 bdw_offset; /* post interleave offset */ 134 u64 bdw_offset; /* post interleave offset */
124 u64 stat_offset; 135 u64 stat_offset;
125 u64 cmd_offset; 136 u64 cmd_offset;
137 void __iomem *nvdimm_flush;
138 u32 dimm_flags;
139};
140
141enum spa_map_type {
142 SPA_MAP_CONTROL,
143 SPA_MAP_APERTURE,
126}; 144};
127 145
128struct nfit_spa_mapping { 146struct nfit_spa_mapping {
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c262e4acd68d..3b8963f21b36 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
175 if (!addr || !length) 175 if (!addr || !length)
176 return; 176 return;
177 177
178 acpi_reserve_region(addr, length, gas->space_id, 0, desc); 178 /* Resources are never freed */
179 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
180 request_region(addr, length, desc);
181 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
182 request_mem_region(addr, length, desc);
179} 183}
180 184
181static void __init acpi_reserve_resources(void) 185static int __init acpi_reserve_resources(void)
182{ 186{
183 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 187 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
184 "ACPI PM1a_EVT_BLK"); 188 "ACPI PM1a_EVT_BLK");
@@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
207 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 211 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
208 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 212 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
209 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 213 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
214
215 return 0;
210} 216}
217fs_initcall_sync(acpi_reserve_resources);
211 218
212void acpi_os_printf(const char *fmt, ...) 219void acpi_os_printf(const char *fmt, ...)
213{ 220{
@@ -1862,7 +1869,6 @@ acpi_status __init acpi_os_initialize(void)
1862 1869
1863acpi_status __init acpi_os_initialize1(void) 1870acpi_status __init acpi_os_initialize1(void)
1864{ 1871{
1865 acpi_reserve_resources();
1866 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1872 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1867 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1873 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1868 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1874 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 10561ce16ed1..f1c966e05078 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/ioport.h> 28#include <linux/ioport.h>
29#include <linux/list.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
31 30
32#ifdef CONFIG_X86 31#ifdef CONFIG_X86
@@ -194,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
194 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; 193 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
195 bool wp = addr->info.mem.write_protect; 194 bool wp = addr->info.mem.write_protect;
196 u64 len = attr->address_length; 195 u64 len = attr->address_length;
196 u64 start, end, offset = 0;
197 struct resource *res = &win->res; 197 struct resource *res = &win->res;
198 198
199 /* 199 /*
@@ -205,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
205 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", 205 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
206 addr->min_address_fixed, addr->max_address_fixed, len); 206 addr->min_address_fixed, addr->max_address_fixed, len);
207 207
208 res->start = attr->minimum;
209 res->end = attr->maximum;
210
211 /* 208 /*
212 * For bridges that translate addresses across the bridge, 209 * For bridges that translate addresses across the bridge,
213 * translation_offset is the offset that must be added to the 210 * translation_offset is the offset that must be added to the
@@ -215,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
215 * primary side. Non-bridge devices must list 0 for all Address 212 * primary side. Non-bridge devices must list 0 for all Address
216 * Translation offset bits. 213 * Translation offset bits.
217 */ 214 */
218 if (addr->producer_consumer == ACPI_PRODUCER) { 215 if (addr->producer_consumer == ACPI_PRODUCER)
219 res->start += attr->translation_offset; 216 offset = attr->translation_offset;
220 res->end += attr->translation_offset; 217 else if (attr->translation_offset)
221 } else if (attr->translation_offset) {
222 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", 218 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
223 attr->translation_offset); 219 attr->translation_offset);
220 start = attr->minimum + offset;
221 end = attr->maximum + offset;
222
223 win->offset = offset;
224 res->start = start;
225 res->end = end;
226 if (sizeof(resource_size_t) < sizeof(u64) &&
227 (offset != win->offset || start != res->start || end != res->end)) {
228 pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
229 attr->minimum, attr->maximum);
230 return false;
224 } 231 }
225 232
226 switch (addr->resource_type) { 233 switch (addr->resource_type) {
@@ -237,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
237 return false; 244 return false;
238 } 245 }
239 246
240 win->offset = attr->translation_offset;
241
242 if (addr->producer_consumer == ACPI_PRODUCER) 247 if (addr->producer_consumer == ACPI_PRODUCER)
243 res->flags |= IORESOURCE_WINDOW; 248 res->flags |= IORESOURCE_WINDOW;
244 249
@@ -622,164 +627,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
622 return (type & types) ? 0 : 1; 627 return (type & types) ? 0 : 1;
623} 628}
624EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); 629EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
625
626struct reserved_region {
627 struct list_head node;
628 u64 start;
629 u64 end;
630};
631
632static LIST_HEAD(reserved_io_regions);
633static LIST_HEAD(reserved_mem_regions);
634
635static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
636 char *desc)
637{
638 unsigned int length = end - start + 1;
639 struct resource *res;
640
641 res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
642 request_region(start, length, desc) :
643 request_mem_region(start, length, desc);
644 if (!res)
645 return -EIO;
646
647 res->flags &= ~flags;
648 return 0;
649}
650
651static int add_region_before(u64 start, u64 end, u8 space_id,
652 unsigned long flags, char *desc,
653 struct list_head *head)
654{
655 struct reserved_region *reg;
656 int error;
657
658 reg = kmalloc(sizeof(*reg), GFP_KERNEL);
659 if (!reg)
660 return -ENOMEM;
661
662 error = request_range(start, end, space_id, flags, desc);
663 if (error) {
664 kfree(reg);
665 return error;
666 }
667
668 reg->start = start;
669 reg->end = end;
670 list_add_tail(&reg->node, head);
671 return 0;
672}
673
674/**
675 * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
676 * @start: Starting address of the region.
677 * @length: Length of the region.
678 * @space_id: Identifier of address space to reserve the region from.
679 * @flags: Resource flags to clear for the region after requesting it.
680 * @desc: Region description (for messages).
681 *
682 * Reserve an I/O or memory region as a system resource to prevent others from
683 * using it. If the new region overlaps with one of the regions (in the given
684 * address space) already reserved by this routine, only the non-overlapping
685 * parts of it will be reserved.
686 *
687 * Returned is either 0 (success) or a negative error code indicating a resource
688 * reservation problem. It is the code of the first encountered error, but the
689 * routine doesn't abort until it has attempted to request all of the parts of
690 * the new region that don't overlap with other regions reserved previously.
691 *
692 * The resources requested by this routine are never released.
693 */
694int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
695 unsigned long flags, char *desc)
696{
697 struct list_head *regions;
698 struct reserved_region *reg;
699 u64 end = start + length - 1;
700 int ret = 0, error = 0;
701
702 if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
703 regions = &reserved_io_regions;
704 else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
705 regions = &reserved_mem_regions;
706 else
707 return -EINVAL;
708
709 if (list_empty(regions))
710 return add_region_before(start, end, space_id, flags, desc, regions);
711
712 list_for_each_entry(reg, regions, node)
713 if (reg->start == end + 1) {
714 /* The new region can be prepended to this one. */
715 ret = request_range(start, end, space_id, flags, desc);
716 if (!ret)
717 reg->start = start;
718
719 return ret;
720 } else if (reg->start > end) {
721 /* No overlap. Add the new region here and get out. */
722 return add_region_before(start, end, space_id, flags,
723 desc, &reg->node);
724 } else if (reg->end == start - 1) {
725 goto combine;
726 } else if (reg->end >= start) {
727 goto overlap;
728 }
729
730 /* The new region goes after the last existing one. */
731 return add_region_before(start, end, space_id, flags, desc, regions);
732
733 overlap:
734 /*
735 * The new region overlaps an existing one.
736 *
737 * The head part of the new region immediately preceding the existing
738 * overlapping one can be combined with it right away.
739 */
740 if (reg->start > start) {
741 error = request_range(start, reg->start - 1, space_id, flags, desc);
742 if (error)
743 ret = error;
744 else
745 reg->start = start;
746 }
747
748 combine:
749 /*
750 * The new region is adjacent to an existing one. If it extends beyond
751 * that region all the way to the next one, it is possible to combine
752 * all three of them.
753 */
754 while (reg->end < end) {
755 struct reserved_region *next = NULL;
756 u64 a = reg->end + 1, b = end;
757
758 if (!list_is_last(&reg->node, regions)) {
759 next = list_next_entry(reg, node);
760 if (next->start <= end)
761 b = next->start - 1;
762 }
763 error = request_range(a, b, space_id, flags, desc);
764 if (!error) {
765 if (next && next->start == b + 1) {
766 reg->end = next->end;
767 list_del(&next->node);
768 kfree(next);
769 } else {
770 reg->end = end;
771 break;
772 }
773 } else if (next) {
774 if (!ret)
775 ret = error;
776
777 reg = next;
778 } else {
779 break;
780 }
781 }
782
783 return ret ? ret : error;
784}
785EXPORT_SYMBOL_GPL(acpi_reserve_region);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 2649a068671d..ec256352f423 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1019,6 +1019,29 @@ static bool acpi_of_match_device(struct acpi_device *adev,
1019 return false; 1019 return false;
1020} 1020}
1021 1021
1022static bool __acpi_match_device_cls(const struct acpi_device_id *id,
1023 struct acpi_hardware_id *hwid)
1024{
1025 int i, msk, byte_shift;
1026 char buf[3];
1027
1028 if (!id->cls)
1029 return false;
1030
1031 /* Apply class-code bitmask, before checking each class-code byte */
1032 for (i = 1; i <= 3; i++) {
1033 byte_shift = 8 * (3 - i);
1034 msk = (id->cls_msk >> byte_shift) & 0xFF;
1035 if (!msk)
1036 continue;
1037
1038 sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
1039 if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
1040 return false;
1041 }
1042 return true;
1043}
1044
1022static const struct acpi_device_id *__acpi_match_device( 1045static const struct acpi_device_id *__acpi_match_device(
1023 struct acpi_device *device, 1046 struct acpi_device *device,
1024 const struct acpi_device_id *ids, 1047 const struct acpi_device_id *ids,
@@ -1036,9 +1059,12 @@ static const struct acpi_device_id *__acpi_match_device(
1036 1059
1037 list_for_each_entry(hwid, &device->pnp.ids, list) { 1060 list_for_each_entry(hwid, &device->pnp.ids, list) {
1038 /* First, check the ACPI/PNP IDs provided by the caller. */ 1061 /* First, check the ACPI/PNP IDs provided by the caller. */
1039 for (id = ids; id->id[0]; id++) 1062 for (id = ids; id->id[0] || id->cls; id++) {
1040 if (!strcmp((char *) id->id, hwid->id)) 1063 if (id->id[0] && !strcmp((char *) id->id, hwid->id))
1041 return id; 1064 return id;
1065 else if (id->cls && __acpi_match_device_cls(id, hwid))
1066 return id;
1067 }
1042 1068
1043 /* 1069 /*
1044 * Next, check ACPI_DT_NAMESPACE_HID and try to match the 1070 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
@@ -2101,6 +2127,8 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
2101 if (info->valid & ACPI_VALID_UID) 2127 if (info->valid & ACPI_VALID_UID)
2102 pnp->unique_id = kstrdup(info->unique_id.string, 2128 pnp->unique_id = kstrdup(info->unique_id.string,
2103 GFP_KERNEL); 2129 GFP_KERNEL);
2130 if (info->valid & ACPI_VALID_CLS)
2131 acpi_add_id(pnp, info->class_code.string);
2104 2132
2105 kfree(info); 2133 kfree(info);
2106 2134
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75ef2411..2922f1f252d5 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/types.h> 34#include <linux/types.h>
35#include <linux/workqueue.h>
35#include <acpi/video.h> 36#include <acpi/video.h>
36 37
37ACPI_MODULE_NAME("video"); 38ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
41 42
42static bool backlight_notifier_registered; 43static bool backlight_notifier_registered;
43static struct notifier_block backlight_nb; 44static struct notifier_block backlight_nb;
45static struct work_struct backlight_notify_work;
44 46
45static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; 47static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
46static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; 48static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
262 { }, 264 { },
263}; 265};
264 266
267/* This uses a workqueue to avoid various locking ordering issues */
268static void acpi_video_backlight_notify_work(struct work_struct *work)
269{
270 if (acpi_video_get_backlight_type() != acpi_backlight_video)
271 acpi_video_unregister_backlight();
272}
273
265static int acpi_video_backlight_notify(struct notifier_block *nb, 274static int acpi_video_backlight_notify(struct notifier_block *nb,
266 unsigned long val, void *bd) 275 unsigned long val, void *bd)
267{ 276{
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
269 278
270 /* A raw bl registering may change video -> native */ 279 /* A raw bl registering may change video -> native */
271 if (backlight->props.type == BACKLIGHT_RAW && 280 if (backlight->props.type == BACKLIGHT_RAW &&
272 val == BACKLIGHT_REGISTERED && 281 val == BACKLIGHT_REGISTERED)
273 acpi_video_get_backlight_type() != acpi_backlight_video) 282 schedule_work(&backlight_notify_work);
274 acpi_video_unregister_backlight();
275 283
276 return NOTIFY_OK; 284 return NOTIFY_OK;
277} 285}
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
304 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 312 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
305 ACPI_UINT32_MAX, find_video, NULL, 313 ACPI_UINT32_MAX, find_video, NULL,
306 &video_caps, NULL); 314 &video_caps, NULL);
315 INIT_WORK(&backlight_notify_work,
316 acpi_video_backlight_notify_work);
307 backlight_nb.notifier_call = acpi_video_backlight_notify; 317 backlight_nb.notifier_call = acpi_video_backlight_notify;
308 backlight_nb.priority = 0; 318 backlight_nb.priority = 0;
309 if (backlight_register_notifier(&backlight_nb) == 0) 319 if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 6d17a3b65ef7..15e40ee62a94 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -48,7 +48,7 @@ config ATA_VERBOSE_ERROR
48 48
49config ATA_ACPI 49config ATA_ACPI
50 bool "ATA ACPI Support" 50 bool "ATA ACPI Support"
51 depends on ACPI && PCI 51 depends on ACPI
52 default y 52 default y
53 help 53 help
54 This option adds support for ATA-related ACPI objects. 54 This option adds support for ATA-related ACPI objects.
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a885981..14b7305d2ba0 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
92 * Other architectures (e.g., ARM) either do not support big endian, or 92 * Other architectures (e.g., ARM) either do not support big endian, or
93 * else leave I/O in little endian mode. 93 * else leave I/O in little endian mode.
94 */ 94 */
95 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN)) 95 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
96 return __raw_readl(addr); 96 return __raw_readl(addr);
97 else 97 else
98 return readl_relaxed(addr); 98 return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
101static inline void brcm_sata_writereg(u32 val, void __iomem *addr) 101static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
102{ 102{
103 /* See brcm_sata_readreg() comments */ 103 /* See brcm_sata_readreg() comments */
104 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN)) 104 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
105 __raw_writel(val, addr); 105 __raw_writel(val, addr);
106 else 106 else
107 writel_relaxed(val, addr); 107 writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
209 priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL); 209 priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
210} 210}
211 211
212#ifdef CONFIG_PM_SLEEP
212static int brcm_ahci_suspend(struct device *dev) 213static int brcm_ahci_suspend(struct device *dev)
213{ 214{
214 struct ata_host *host = dev_get_drvdata(dev); 215 struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
231 brcm_sata_phys_enable(priv); 232 brcm_sata_phys_enable(priv);
232 return ahci_platform_resume(dev); 233 return ahci_platform_resume(dev);
233} 234}
235#endif
234 236
235static struct scsi_host_template ahci_platform_sht = { 237static struct scsi_host_template ahci_platform_sht = {
236 AHCI_SHT(DRV_NAME), 238 AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 614c78f510f0..1befb114c384 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -20,6 +20,8 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/libata.h> 21#include <linux/libata.h>
22#include <linux/ahci_platform.h> 22#include <linux/ahci_platform.h>
23#include <linux/acpi.h>
24#include <linux/pci_ids.h>
23#include "ahci.h" 25#include "ahci.h"
24 26
25#define DRV_NAME "ahci" 27#define DRV_NAME "ahci"
@@ -79,12 +81,19 @@ static const struct of_device_id ahci_of_match[] = {
79}; 81};
80MODULE_DEVICE_TABLE(of, ahci_of_match); 82MODULE_DEVICE_TABLE(of, ahci_of_match);
81 83
84static const struct acpi_device_id ahci_acpi_match[] = {
85 { ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
86 {},
87};
88MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
89
82static struct platform_driver ahci_driver = { 90static struct platform_driver ahci_driver = {
83 .probe = ahci_probe, 91 .probe = ahci_probe,
84 .remove = ata_platform_remove_one, 92 .remove = ata_platform_remove_one,
85 .driver = { 93 .driver = {
86 .name = DRV_NAME, 94 .name = DRV_NAME,
87 .of_match_table = ahci_of_match, 95 .of_match_table = ahci_of_match,
96 .acpi_match_table = ahci_acpi_match,
88 .pm = &ahci_pm_ops, 97 .pm = &ahci_pm_ops,
89 }, 98 },
90}; 99};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..19bcb80b2031 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
694 * RETURNS: 694 * RETURNS:
695 * Block address read from @tf. 695 * Block address read from @tf.
696 */ 696 */
697u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 697u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
698{ 698{
699 u64 block = 0; 699 u64 block = 0;
700 700
701 if (!dev || tf->flags & ATA_TFLAG_LBA) { 701 if (tf->flags & ATA_TFLAG_LBA) {
702 if (tf->flags & ATA_TFLAG_LBA48) { 702 if (tf->flags & ATA_TFLAG_LBA48) {
703 block |= (u64)tf->hob_lbah << 40; 703 block |= (u64)tf->hob_lbah << 40;
704 block |= (u64)tf->hob_lbam << 32; 704 block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
2147 return 0; 2147 return 0;
2148} 2148}
2149 2149
2150static void ata_dev_config_sense_reporting(struct ata_device *dev)
2151{
2152 unsigned int err_mask;
2153
2154 if (!ata_id_has_sense_reporting(dev->id))
2155 return;
2156
2157 if (ata_id_sense_reporting_enabled(dev->id))
2158 return;
2159
2160 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2161 if (err_mask) {
2162 ata_dev_dbg(dev,
2163 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2164 err_mask);
2165 }
2166}
2167
2168/** 2150/**
2169 * ata_dev_configure - Configure the specified ATA/ATAPI device 2151 * ata_dev_configure - Configure the specified ATA/ATAPI device
2170 * @dev: Target device to configure 2152 * @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
2387 dev->devslp_timing[i] = sata_setting[j]; 2369 dev->devslp_timing[i] = sata_setting[j];
2388 } 2370 }
2389 } 2371 }
2390 ata_dev_config_sense_reporting(dev); 2372
2391 dev->cdb_len = 16; 2373 dev->cdb_len = 16;
2392 } 2374 }
2393 2375
@@ -2478,6 +2460,10 @@ int ata_dev_configure(struct ata_device *dev)
2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2460 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2479 dev->max_sectors); 2461 dev->max_sectors);
2480 2462
2463 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2464 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2465 dev->max_sectors);
2466
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2467 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2482 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2468 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2483 2469
@@ -4146,6 +4132,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4132 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4133 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4148 4134
4135 /*
4136 * Causes silent data corruption with higher max sects.
4137 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4138 */
4139 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4140
4149 /* Devices we expect to fail diagnostics */ 4141 /* Devices we expect to fail diagnostics */
4150 4142
4151 /* Devices where NCQ should be avoided */ 4143 /* Devices where NCQ should be avoided */
@@ -4174,9 +4166,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4166 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4175 ATA_HORKAGE_FIRMWARE_WARN }, 4167 ATA_HORKAGE_FIRMWARE_WARN },
4176 4168
4177 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4169 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4170 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4171 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4172 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4180 4173
4181 /* Blacklist entries taken from Silicon Image 3124/3132 4174 /* Blacklist entries taken from Silicon Image 3124/3132
4182 Windows driver .inf file - also several Linux problem reports */ 4175 Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4222,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4222 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4223 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4231 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4224 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4232 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4225 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4233 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4226 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4234 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4227 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4228 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4231,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4231 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4232 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4240 4233
4234 /* devices that don't properly handle TRIM commands */
4235 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4236
4241 /* 4237 /*
4242 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4238 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4243 * (Return Zero After Trim) flags in the ATA Command Set are 4239 * (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4497,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4501 else /* In the ancient relic department - skip all of this */ 4497 else /* In the ancient relic department - skip all of this */
4502 return 0; 4498 return 0;
4503 4499
4504 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4500 /* On some disks, this command causes spin-up, so we need longer timeout */
4501 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4505 4502
4506 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4503 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4507 return err_mask; 4504 return err_mask;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031a893c..cb0508af1459 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1592 tf->hob_lbah = buf[10]; 1592 tf->hob_lbah = buf[10];
1593 tf->nsect = buf[12]; 1593 tf->nsect = buf[12];
1594 tf->hob_nsect = buf[13]; 1594 tf->hob_nsect = buf[13];
1595 if (ata_id_has_ncq_autosense(dev->id))
1596 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1597 1595
1598 return 0; 1596 return 0;
1599} 1597}
@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1630} 1628}
1631 1629
1632/** 1630/**
1633 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1634 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
1635 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1636 * @dfl_sense_key: default sense key to use
1637 *
1638 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1639 * SENSE. This function is EH helper.
1640 *
1641 * LOCKING:
1642 * Kernel thread context (may sleep).
1643 *
1644 * RETURNS:
1645 * encoded sense data on success, 0 on failure or if sense data
1646 * is not available.
1647 */
1648static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
1649 struct scsi_cmnd *cmd)
1650{
1651 struct ata_device *dev = qc->dev;
1652 struct ata_taskfile tf;
1653 unsigned int err_mask;
1654
1655 if (!cmd)
1656 return 0;
1657
1658 DPRINTK("ATA request sense\n");
1659 ata_dev_warn(dev, "request sense\n");
1660 if (!ata_id_sense_reporting_enabled(dev->id)) {
1661 ata_dev_warn(qc->dev, "sense data reporting disabled\n");
1662 return 0;
1663 }
1664 ata_tf_init(dev, &tf);
1665
1666 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1667 tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1668 tf.command = ATA_CMD_REQ_SENSE_DATA;
1669 tf.protocol = ATA_PROT_NODATA;
1670
1671 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1672 /*
1673 * ACS-4 states:
1674 * The device may set the SENSE DATA AVAILABLE bit to one in the
1675 * STATUS field and clear the ERROR bit to zero in the STATUS field
1676 * to indicate that the command returned completion without an error
1677 * and the sense data described in table 306 is available.
1678 *
1679 * IOW the 'ATA_SENSE' bit might not be set even though valid
1680 * sense data is available.
1681 * So check for both.
1682 */
1683 if ((tf.command & ATA_SENSE) ||
1684 tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
1685 ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
1686 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1687 ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
1688 tf.lbah, tf.lbam, tf.lbal);
1689 } else {
1690 ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
1691 tf.command, err_mask);
1692 }
1693 return err_mask;
1694}
1695
1696/**
1697 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE 1631 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1698 * @dev: device to perform REQUEST_SENSE to 1632 * @dev: device to perform REQUEST_SENSE to
1699 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long) 1633 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1855 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1789 memcpy(&qc->result_tf, &tf, sizeof(tf));
1856 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1790 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1857 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1791 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1858 if (qc->result_tf.auxiliary) {
1859 char sense_key, asc, ascq;
1860
1861 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1862 asc = (qc->result_tf.auxiliary >> 8) & 0xff;
1863 ascq = qc->result_tf.auxiliary & 0xff;
1864 ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
1865 sense_key, asc, ascq);
1866 ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
1867 ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
1868 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1869 }
1870
1871 ehc->i.err_mask &= ~AC_ERR_DEV; 1792 ehc->i.err_mask &= ~AC_ERR_DEV;
1872} 1793}
1873 1794
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1897 return ATA_EH_RESET; 1818 return ATA_EH_RESET;
1898 } 1819 }
1899 1820
1900 /*
1901 * Sense data reporting does not work if the
1902 * device fault bit is set.
1903 */
1904 if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
1905 !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
1906 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1907 tmp = ata_eh_request_sense(qc, qc->scsicmd);
1908 if (tmp)
1909 qc->err_mask |= tmp;
1910 else
1911 ata_scsi_set_sense_information(qc->scsicmd, tf);
1912 } else {
1913 ata_dev_warn(qc->dev, "sense data available but port frozen\n");
1914 }
1915 }
1916
1917 /* Set by NCQ autosense or request sense above */
1918 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
1919 return 0;
1920
1921 if (stat & (ATA_ERR | ATA_DF)) 1821 if (stat & (ATA_ERR | ATA_DF))
1922 qc->err_mask |= AC_ERR_DEV; 1822 qc->err_mask |= AC_ERR_DEV;
1923 else 1823 else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
2661 2561
2662#ifdef CONFIG_ATA_VERBOSE_ERROR 2562#ifdef CONFIG_ATA_VERBOSE_ERROR
2663 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2563 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2664 ATA_SENSE | ATA_ERR)) { 2564 ATA_ERR)) {
2665 if (res->command & ATA_BUSY) 2565 if (res->command & ATA_BUSY)
2666 ata_dev_err(qc->dev, "status: { Busy }\n"); 2566 ata_dev_err(qc->dev, "status: { Busy }\n");
2667 else 2567 else
2668 ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n", 2568 ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
2669 res->command & ATA_DRDY ? "DRDY " : "", 2569 res->command & ATA_DRDY ? "DRDY " : "",
2670 res->command & ATA_DF ? "DF " : "", 2570 res->command & ATA_DF ? "DF " : "",
2671 res->command & ATA_DRQ ? "DRQ " : "", 2571 res->command & ATA_DRQ ? "DRQ " : "",
2672 res->command & ATA_SENSE ? "SENSE " : "",
2673 res->command & ATA_ERR ? "ERR " : ""); 2572 res->command & ATA_ERR ? "ERR " : "");
2674 } 2573 }
2675 2574
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
460 ATA_LFLAG_NO_SRST | 460 ATA_LFLAG_NO_SRST |
461 ATA_LFLAG_ASSUME_ATA; 461 ATA_LFLAG_ASSUME_ATA;
462 } 462 }
463 } else if (vendor == 0x11ab && devid == 0x4140) {
464 /* Marvell 4140 quirks */
465 ata_for_each_link(link, ap, EDGE) {
466 /* port 4 is for SEMB device and it doesn't like SRST */
467 if (link->pmp == 4)
468 link->flags |= ATA_LFLAG_DISABLED;
469 }
463 } 470 }
464} 471}
465 472
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..0d7f0da3a269 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
270 ata_scsi_park_show, ata_scsi_park_store); 270 ata_scsi_park_show, ata_scsi_park_store);
271EXPORT_SYMBOL_GPL(dev_attr_unload_heads); 271EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
272 272
273void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 273static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
274{ 274{
275 if (!cmd)
276 return;
277
278 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 275 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
279 276
280 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); 277 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
281} 278}
282 279
283void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
284 const struct ata_taskfile *tf)
285{
286 u64 information;
287
288 if (!cmd)
289 return;
290
291 information = ata_tf_read_block(tf, NULL);
292 scsi_set_sense_information(cmd->sense_buffer, information);
293}
294
295static ssize_t 280static ssize_t
296ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, 281ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
297 const char *buf, size_t count) 282 const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1792 ((cdb[2] & 0x20) || need_sense)) { 1777 ((cdb[2] & 0x20) || need_sense)) {
1793 ata_gen_passthru_sense(qc); 1778 ata_gen_passthru_sense(qc);
1794 } else { 1779 } else {
1795 if (qc->flags & ATA_QCFLAG_SENSE_VALID) { 1780 if (!need_sense) {
1796 cmd->result = SAM_STAT_CHECK_CONDITION;
1797 } else if (!need_sense) {
1798 cmd->result = SAM_STAT_GOOD; 1781 cmd->result = SAM_STAT_GOOD;
1799 } else { 1782 } else {
1800 /* TODO: decide which descriptor format to use 1783 /* TODO: decide which descriptor format to use
@@ -2568,7 +2551,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2551 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2569 rbuf[15] = lowest_aligned; 2552 rbuf[15] = lowest_aligned;
2570 2553
2571 if (ata_id_has_trim(args->id)) { 2554 if (ata_id_has_trim(args->id) &&
2555 !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
2572 rbuf[14] |= 0x80; /* LBPME */ 2556 rbuf[14] |= 0x80; /* LBPME */
2573 2557
2574 if (ata_id_has_zero_after_trim(args->id) && 2558 if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
569 569
570 if (!ata_id_has_trim(ata_dev->id)) 570 if (!ata_id_has_trim(ata_dev->id))
571 mode = "unsupported"; 571 mode = "unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
573 mode = "forced_unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) 574 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
573 mode = "forced_unqueued"; 575 mode = "forced_unqueued";
574 else if (ata_fpdma_dsm_supported(ata_dev)) 576 else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a175f9f1..f840ca18a7c0 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
67extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 67extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
68 u64 block, u32 n_block, unsigned int tf_flags, 68 u64 block, u32 n_block, unsigned int tf_flags,
69 unsigned int tag); 69 unsigned int tag);
70extern u64 ata_tf_read_block(const struct ata_taskfile *tf, 70extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
71 struct ata_device *dev);
72extern unsigned ata_exec_internal(struct ata_device *dev, 71extern unsigned ata_exec_internal(struct ata_device *dev,
73 struct ata_taskfile *tf, const u8 *cdb, 72 struct ata_taskfile *tf, const u8 *cdb,
74 int dma_dir, void *buf, unsigned int buflen, 73 int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
138 struct scsi_host_template *sht); 137 struct scsi_host_template *sht);
139extern void ata_scsi_scan_host(struct ata_port *ap, int sync); 138extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
140extern int ata_scsi_offline_dev(struct ata_device *dev); 139extern int ata_scsi_offline_dev(struct ata_device *dev);
141extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
142extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
143 const struct ata_taskfile *tf);
144extern void ata_scsi_media_change_notify(struct ata_device *dev); 140extern void ata_scsi_media_change_notify(struct ata_device *dev);
145extern void ata_scsi_hotplug(struct work_struct *work); 141extern void ata_scsi_hotplug(struct work_struct *work);
146extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); 142extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index a9b0c820f2eb..5d9ee99c2148 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller source file 4 * Arasan Compact Flash host controller source file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -968,7 +968,7 @@ static struct platform_driver arasan_cf_driver = {
968 968
969module_platform_driver(arasan_cf_driver); 969module_platform_driver(arasan_cf_driver);
970 970
971MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 971MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
972MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); 972MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
973MODULE_LICENSE("GPL"); 973MODULE_LICENSE("GPL");
974MODULE_ALIAS("platform:" DRIVER_NAME); 974MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a719b4..fab504fd9cfd 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1238 readl(mmio + PDC_SDRAM_CONTROL); 1238 readl(mmio + PDC_SDRAM_CONTROL);
1239 1239
1240 /* Turn on for ECC */ 1240 /* Turn on for ECC */
1241 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1241 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1242 PDC_DIMM_SPD_TYPE, &spd0); 1242 PDC_DIMM_SPD_TYPE, &spd0)) {
1243 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1244 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1245 return 1;
1246 }
1243 if (spd0 == 0x02) { 1247 if (spd0 == 0x02) {
1244 data |= (0x01 << 16); 1248 data |= (0x01 << 16);
1245 writel(data, mmio + PDC_SDRAM_CONTROL); 1249 writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
1380 1384
1381 /* ECC initiliazation. */ 1385 /* ECC initiliazation. */
1382 1386
1383 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 1387 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1384 PDC_DIMM_SPD_TYPE, &spd0); 1388 PDC_DIMM_SPD_TYPE, &spd0)) {
1389 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1390 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1391 return 1;
1392 }
1385 if (spd0 == 0x02) { 1393 if (spd0 == 0x02) {
1386 void *buf; 1394 void *buf;
1387 VPRINTK("Start ECC initialization\n"); 1395 VPRINTK("Start ECC initialization\n");
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 9c4288362a8e..894bda114224 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -563,10 +563,8 @@ static void fw_dev_release(struct device *dev)
563 kfree(fw_priv); 563 kfree(fw_priv);
564} 564}
565 565
566static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) 566static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
567{ 567{
568 struct firmware_priv *fw_priv = to_firmware_priv(dev);
569
570 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id)) 568 if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
571 return -ENOMEM; 569 return -ENOMEM;
572 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout)) 570 if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
@@ -577,6 +575,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
577 return 0; 575 return 0;
578} 576}
579 577
578static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
579{
580 struct firmware_priv *fw_priv = to_firmware_priv(dev);
581 int err = 0;
582
583 mutex_lock(&fw_lock);
584 if (fw_priv->buf)
585 err = do_firmware_uevent(fw_priv, env);
586 mutex_unlock(&fw_lock);
587 return err;
588}
589
580static struct class firmware_class = { 590static struct class firmware_class = {
581 .name = "firmware", 591 .name = "firmware",
582 .class_attrs = firmware_class_attrs, 592 .class_attrs = firmware_class_attrs,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index cdd547bd67df..0ee43c1056e0 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -6,6 +6,7 @@
6 * This file is released under the GPLv2. 6 * This file is released under the GPLv2.
7 */ 7 */
8 8
9#include <linux/delay.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
@@ -19,6 +20,8 @@
19#include <linux/suspend.h> 20#include <linux/suspend.h>
20#include <linux/export.h> 21#include <linux/export.h>
21 22
23#define GENPD_RETRY_MAX_MS 250 /* Approximate */
24
22#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ 25#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
23({ \ 26({ \
24 type (*__routine)(struct device *__d); \ 27 type (*__routine)(struct device *__d); \
@@ -2131,6 +2134,7 @@ EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2131static void genpd_dev_pm_detach(struct device *dev, bool power_off) 2134static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2132{ 2135{
2133 struct generic_pm_domain *pd; 2136 struct generic_pm_domain *pd;
2137 unsigned int i;
2134 int ret = 0; 2138 int ret = 0;
2135 2139
2136 pd = pm_genpd_lookup_dev(dev); 2140 pd = pm_genpd_lookup_dev(dev);
@@ -2139,10 +2143,12 @@ static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2139 2143
2140 dev_dbg(dev, "removing from PM domain %s\n", pd->name); 2144 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2141 2145
2142 while (1) { 2146 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2143 ret = pm_genpd_remove_device(pd, dev); 2147 ret = pm_genpd_remove_device(pd, dev);
2144 if (ret != -EAGAIN) 2148 if (ret != -EAGAIN)
2145 break; 2149 break;
2150
2151 mdelay(i);
2146 cond_resched(); 2152 cond_resched();
2147 } 2153 }
2148 2154
@@ -2183,6 +2189,7 @@ int genpd_dev_pm_attach(struct device *dev)
2183{ 2189{
2184 struct of_phandle_args pd_args; 2190 struct of_phandle_args pd_args;
2185 struct generic_pm_domain *pd; 2191 struct generic_pm_domain *pd;
2192 unsigned int i;
2186 int ret; 2193 int ret;
2187 2194
2188 if (!dev->of_node) 2195 if (!dev->of_node)
@@ -2218,10 +2225,12 @@ int genpd_dev_pm_attach(struct device *dev)
2218 2225
2219 dev_dbg(dev, "adding to PM domain %s\n", pd->name); 2226 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2220 2227
2221 while (1) { 2228 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
2222 ret = pm_genpd_add_device(pd, dev); 2229 ret = pm_genpd_add_device(pd, dev);
2223 if (ret != -EAGAIN) 2230 if (ret != -EAGAIN)
2224 break; 2231 break;
2232
2233 mdelay(i);
2225 cond_resched(); 2234 cond_resched();
2226 } 2235 }
2227 2236
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 7470004ca810..eb6e67451dec 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -45,14 +45,12 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
45 return -EEXIST; 45 return -EEXIST;
46 } 46 }
47 47
48 dev->power.wakeirq = wirq;
49 spin_unlock_irqrestore(&dev->power.lock, flags);
50
51 err = device_wakeup_attach_irq(dev, wirq); 48 err = device_wakeup_attach_irq(dev, wirq);
52 if (err) 49 if (!err)
53 return err; 50 dev->power.wakeirq = wirq;
54 51
55 return 0; 52 spin_unlock_irqrestore(&dev->power.lock, flags);
53 return err;
56} 54}
57 55
58/** 56/**
@@ -105,10 +103,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
105 return; 103 return;
106 104
107 spin_lock_irqsave(&dev->power.lock, flags); 105 spin_lock_irqsave(&dev->power.lock, flags);
106 device_wakeup_detach_irq(dev);
108 dev->power.wakeirq = NULL; 107 dev->power.wakeirq = NULL;
109 spin_unlock_irqrestore(&dev->power.lock, flags); 108 spin_unlock_irqrestore(&dev->power.lock, flags);
110 109
111 device_wakeup_detach_irq(dev);
112 if (wirq->dedicated_irq) 110 if (wirq->dedicated_irq)
113 free_irq(wirq->irq, wirq); 111 free_irq(wirq->irq, wirq);
114 kfree(wirq); 112 kfree(wirq);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 40f71603378c..51f15bc15774 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -281,32 +281,25 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
281 * Attach a device wakeirq to the wakeup source so the device 281 * Attach a device wakeirq to the wakeup source so the device
282 * wake IRQ can be configured automatically for suspend and 282 * wake IRQ can be configured automatically for suspend and
283 * resume. 283 * resume.
284 *
285 * Call under the device's power.lock lock.
284 */ 286 */
285int device_wakeup_attach_irq(struct device *dev, 287int device_wakeup_attach_irq(struct device *dev,
286 struct wake_irq *wakeirq) 288 struct wake_irq *wakeirq)
287{ 289{
288 struct wakeup_source *ws; 290 struct wakeup_source *ws;
289 int ret = 0;
290 291
291 spin_lock_irq(&dev->power.lock);
292 ws = dev->power.wakeup; 292 ws = dev->power.wakeup;
293 if (!ws) { 293 if (!ws) {
294 dev_err(dev, "forgot to call call device_init_wakeup?\n"); 294 dev_err(dev, "forgot to call call device_init_wakeup?\n");
295 ret = -EINVAL; 295 return -EINVAL;
296 goto unlock;
297 } 296 }
298 297
299 if (ws->wakeirq) { 298 if (ws->wakeirq)
300 ret = -EEXIST; 299 return -EEXIST;
301 goto unlock;
302 }
303 300
304 ws->wakeirq = wakeirq; 301 ws->wakeirq = wakeirq;
305 302 return 0;
306unlock:
307 spin_unlock_irq(&dev->power.lock);
308
309 return ret;
310} 303}
311 304
312/** 305/**
@@ -314,20 +307,16 @@ unlock:
314 * @dev: Device to handle 307 * @dev: Device to handle
315 * 308 *
316 * Removes a device wakeirq from the wakeup source. 309 * Removes a device wakeirq from the wakeup source.
310 *
311 * Call under the device's power.lock lock.
317 */ 312 */
318void device_wakeup_detach_irq(struct device *dev) 313void device_wakeup_detach_irq(struct device *dev)
319{ 314{
320 struct wakeup_source *ws; 315 struct wakeup_source *ws;
321 316
322 spin_lock_irq(&dev->power.lock);
323 ws = dev->power.wakeup; 317 ws = dev->power.wakeup;
324 if (!ws) 318 if (ws)
325 goto unlock; 319 ws->wakeirq = NULL;
326
327 ws->wakeirq = NULL;
328
329unlock:
330 spin_unlock_irq(&dev->power.lock);
331} 320}
332 321
333/** 322/**
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b2b2849fc6d3..873ddf91c9d3 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -136,7 +136,7 @@ struct regmap {
136 /* if set, the HW registers are known to match map->reg_defaults */ 136 /* if set, the HW registers are known to match map->reg_defaults */
137 bool no_sync_defaults; 137 bool no_sync_defaults;
138 138
139 struct reg_default *patch; 139 struct reg_sequence *patch;
140 int patch_regs; 140 int patch_regs;
141 141
142 /* if set, converts bulk rw to single rw */ 142 /* if set, converts bulk rw to single rw */
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
296 if (!blk) 296 if (!blk)
297 return -ENOMEM; 297 return -ENOMEM;
298 298
299 present = krealloc(rbnode->cache_present, 299 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
300 BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); 300 present = krealloc(rbnode->cache_present,
301 if (!present) { 301 BITS_TO_LONGS(blklen) * sizeof(*present),
302 kfree(blk); 302 GFP_KERNEL);
303 return -ENOMEM; 303 if (!present) {
304 kfree(blk);
305 return -ENOMEM;
306 }
307
308 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
309 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
310 * sizeof(*present));
311 } else {
312 present = rbnode->cache_present;
304 } 313 }
305 314
306 /* insert the register value in the correct place in the rbnode block */ 315 /* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7111d04f2621..0a849eeaf952 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -34,7 +34,7 @@
34 34
35static int _regmap_update_bits(struct regmap *map, unsigned int reg, 35static int _regmap_update_bits(struct regmap *map, unsigned int reg,
36 unsigned int mask, unsigned int val, 36 unsigned int mask, unsigned int val,
37 bool *change); 37 bool *change, bool force_write);
38 38
39static int _regmap_bus_reg_read(void *context, unsigned int reg, 39static int _regmap_bus_reg_read(void *context, unsigned int reg,
40 unsigned int *val); 40 unsigned int *val);
@@ -1178,7 +1178,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1178 ret = _regmap_update_bits(map, range->selector_reg, 1178 ret = _regmap_update_bits(map, range->selector_reg,
1179 range->selector_mask, 1179 range->selector_mask,
1180 win_page << range->selector_shift, 1180 win_page << range->selector_shift,
1181 &page_chg); 1181 &page_chg, false);
1182 1182
1183 map->work_buf = orig_work_buf; 1183 map->work_buf = orig_work_buf;
1184 1184
@@ -1624,6 +1624,18 @@ int regmap_fields_write(struct regmap_field *field, unsigned int id,
1624} 1624}
1625EXPORT_SYMBOL_GPL(regmap_fields_write); 1625EXPORT_SYMBOL_GPL(regmap_fields_write);
1626 1626
1627int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
1628 unsigned int val)
1629{
1630 if (id >= field->id_size)
1631 return -EINVAL;
1632
1633 return regmap_write_bits(field->regmap,
1634 field->reg + (field->id_offset * id),
1635 field->mask, val << field->shift);
1636}
1637EXPORT_SYMBOL_GPL(regmap_fields_force_write);
1638
1627/** 1639/**
1628 * regmap_fields_update_bits(): Perform a read/modify/write cycle 1640 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1629 * on the register field 1641 * on the register field
@@ -1743,7 +1755,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
1743 * relative. The page register has been written if that was neccessary. 1755 * relative. The page register has been written if that was neccessary.
1744 */ 1756 */
1745static int _regmap_raw_multi_reg_write(struct regmap *map, 1757static int _regmap_raw_multi_reg_write(struct regmap *map,
1746 const struct reg_default *regs, 1758 const struct reg_sequence *regs,
1747 size_t num_regs) 1759 size_t num_regs)
1748{ 1760{
1749 int ret; 1761 int ret;
@@ -1800,12 +1812,12 @@ static unsigned int _regmap_register_page(struct regmap *map,
1800} 1812}
1801 1813
1802static int _regmap_range_multi_paged_reg_write(struct regmap *map, 1814static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1803 struct reg_default *regs, 1815 struct reg_sequence *regs,
1804 size_t num_regs) 1816 size_t num_regs)
1805{ 1817{
1806 int ret; 1818 int ret;
1807 int i, n; 1819 int i, n;
1808 struct reg_default *base; 1820 struct reg_sequence *base;
1809 unsigned int this_page = 0; 1821 unsigned int this_page = 0;
1810 /* 1822 /*
1811 * the set of registers are not neccessarily in order, but 1823 * the set of registers are not neccessarily in order, but
@@ -1843,7 +1855,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1843} 1855}
1844 1856
1845static int _regmap_multi_reg_write(struct regmap *map, 1857static int _regmap_multi_reg_write(struct regmap *map,
1846 const struct reg_default *regs, 1858 const struct reg_sequence *regs,
1847 size_t num_regs) 1859 size_t num_regs)
1848{ 1860{
1849 int i; 1861 int i;
@@ -1895,8 +1907,8 @@ static int _regmap_multi_reg_write(struct regmap *map,
1895 struct regmap_range_node *range; 1907 struct regmap_range_node *range;
1896 range = _regmap_range_lookup(map, reg); 1908 range = _regmap_range_lookup(map, reg);
1897 if (range) { 1909 if (range) {
1898 size_t len = sizeof(struct reg_default)*num_regs; 1910 size_t len = sizeof(struct reg_sequence)*num_regs;
1899 struct reg_default *base = kmemdup(regs, len, 1911 struct reg_sequence *base = kmemdup(regs, len,
1900 GFP_KERNEL); 1912 GFP_KERNEL);
1901 if (!base) 1913 if (!base)
1902 return -ENOMEM; 1914 return -ENOMEM;
@@ -1929,7 +1941,7 @@ static int _regmap_multi_reg_write(struct regmap *map,
1929 * A value of zero will be returned on success, a negative errno will be 1941 * A value of zero will be returned on success, a negative errno will be
1930 * returned in error cases. 1942 * returned in error cases.
1931 */ 1943 */
1932int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, 1944int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
1933 int num_regs) 1945 int num_regs)
1934{ 1946{
1935 int ret; 1947 int ret;
@@ -1962,7 +1974,7 @@ EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
1962 * be returned in error cases. 1974 * be returned in error cases.
1963 */ 1975 */
1964int regmap_multi_reg_write_bypassed(struct regmap *map, 1976int regmap_multi_reg_write_bypassed(struct regmap *map,
1965 const struct reg_default *regs, 1977 const struct reg_sequence *regs,
1966 int num_regs) 1978 int num_regs)
1967{ 1979{
1968 int ret; 1980 int ret;
@@ -2327,7 +2339,7 @@ EXPORT_SYMBOL_GPL(regmap_bulk_read);
2327 2339
2328static int _regmap_update_bits(struct regmap *map, unsigned int reg, 2340static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2329 unsigned int mask, unsigned int val, 2341 unsigned int mask, unsigned int val,
2330 bool *change) 2342 bool *change, bool force_write)
2331{ 2343{
2332 int ret; 2344 int ret;
2333 unsigned int tmp, orig; 2345 unsigned int tmp, orig;
@@ -2339,7 +2351,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2339 tmp = orig & ~mask; 2351 tmp = orig & ~mask;
2340 tmp |= val & mask; 2352 tmp |= val & mask;
2341 2353
2342 if (tmp != orig) { 2354 if (force_write || (tmp != orig)) {
2343 ret = _regmap_write(map, reg, tmp); 2355 ret = _regmap_write(map, reg, tmp);
2344 if (change) 2356 if (change)
2345 *change = true; 2357 *change = true;
@@ -2367,7 +2379,7 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
2367 int ret; 2379 int ret;
2368 2380
2369 map->lock(map->lock_arg); 2381 map->lock(map->lock_arg);
2370 ret = _regmap_update_bits(map, reg, mask, val, NULL); 2382 ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
2371 map->unlock(map->lock_arg); 2383 map->unlock(map->lock_arg);
2372 2384
2373 return ret; 2385 return ret;
@@ -2375,6 +2387,29 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
2375EXPORT_SYMBOL_GPL(regmap_update_bits); 2387EXPORT_SYMBOL_GPL(regmap_update_bits);
2376 2388
2377/** 2389/**
2390 * regmap_write_bits: Perform a read/modify/write cycle on the register map
2391 *
2392 * @map: Register map to update
2393 * @reg: Register to update
2394 * @mask: Bitmask to change
2395 * @val: New value for bitmask
2396 *
2397 * Returns zero for success, a negative number on error.
2398 */
2399int regmap_write_bits(struct regmap *map, unsigned int reg,
2400 unsigned int mask, unsigned int val)
2401{
2402 int ret;
2403
2404 map->lock(map->lock_arg);
2405 ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
2406 map->unlock(map->lock_arg);
2407
2408 return ret;
2409}
2410EXPORT_SYMBOL_GPL(regmap_write_bits);
2411
2412/**
2378 * regmap_update_bits_async: Perform a read/modify/write cycle on the register 2413 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2379 * map asynchronously 2414 * map asynchronously
2380 * 2415 *
@@ -2398,7 +2433,7 @@ int regmap_update_bits_async(struct regmap *map, unsigned int reg,
2398 2433
2399 map->async = true; 2434 map->async = true;
2400 2435
2401 ret = _regmap_update_bits(map, reg, mask, val, NULL); 2436 ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
2402 2437
2403 map->async = false; 2438 map->async = false;
2404 2439
@@ -2427,7 +2462,7 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
2427 int ret; 2462 int ret;
2428 2463
2429 map->lock(map->lock_arg); 2464 map->lock(map->lock_arg);
2430 ret = _regmap_update_bits(map, reg, mask, val, change); 2465 ret = _regmap_update_bits(map, reg, mask, val, change, false);
2431 map->unlock(map->lock_arg); 2466 map->unlock(map->lock_arg);
2432 return ret; 2467 return ret;
2433} 2468}
@@ -2460,7 +2495,7 @@ int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2460 2495
2461 map->async = true; 2496 map->async = true;
2462 2497
2463 ret = _regmap_update_bits(map, reg, mask, val, change); 2498 ret = _regmap_update_bits(map, reg, mask, val, change, false);
2464 2499
2465 map->async = false; 2500 map->async = false;
2466 2501
@@ -2552,10 +2587,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
2552 * The caller must ensure that this function cannot be called 2587 * The caller must ensure that this function cannot be called
2553 * concurrently with either itself or regcache_sync(). 2588 * concurrently with either itself or regcache_sync().
2554 */ 2589 */
2555int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 2590int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2556 int num_regs) 2591 int num_regs)
2557{ 2592{
2558 struct reg_default *p; 2593 struct reg_sequence *p;
2559 int ret; 2594 int ret;
2560 bool bypass; 2595 bool bypass;
2561 2596
@@ -2564,7 +2599,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
2564 return 0; 2599 return 0;
2565 2600
2566 p = krealloc(map->patch, 2601 p = krealloc(map->patch,
2567 sizeof(struct reg_default) * (map->patch_regs + num_regs), 2602 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2568 GFP_KERNEL); 2603 GFP_KERNEL);
2569 if (p) { 2604 if (p) {
2570 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); 2605 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..3177b245d2bd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
240 while ((entry = llist_del_all(&cq->list)) != NULL) { 240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry); 241 entry = llist_reverse_order(entry);
242 do { 242 do {
243 struct request_queue *q = NULL;
244
243 cmd = container_of(entry, struct nullb_cmd, ll_list); 245 cmd = container_of(entry, struct nullb_cmd, ll_list);
244 entry = entry->next; 246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
245 end_cmd(cmd); 249 end_cmd(cmd);
246 250
247 if (cmd->rq) { 251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
248 struct request_queue *q = cmd->rq->q; 252 spin_lock(q->queue_lock);
249 253 if (blk_queue_stopped(q))
250 if (!q->mq_ops && blk_queue_stopped(q)) { 254 blk_start_queue(q);
251 spin_lock(q->queue_lock); 255 spin_unlock(q->queue_lock);
252 if (blk_queue_stopped(q))
253 blk_start_queue(q);
254 spin_unlock(q->queue_lock);
255 }
256 } 256 }
257 } while (entry); 257 } while (entry);
258 } 258 }
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index d1d6141920d3..7920c2741b47 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2108 goto out_free_disk; 2108 goto out_free_disk;
2109 2109
2110 add_disk(ns->disk); 2110 add_disk(ns->disk);
2111 if (ns->ms) 2111 if (ns->ms) {
2112 revalidate_disk(ns->disk); 2112 struct block_device *bd = bdget_disk(ns->disk, 0);
2113 if (!bd)
2114 return;
2115 if (blkdev_get(bd, FMODE_READ, NULL)) {
2116 bdput(bd);
2117 return;
2118 }
2119 blkdev_reread_part(bd);
2120 blkdev_put(bd, FMODE_READ);
2121 }
2113 return; 2122 return;
2114 out_free_disk: 2123 out_free_disk:
2115 kfree(disk); 2124 kfree(disk);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..bc67a93aa4f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
523# define rbd_assert(expr) ((void) 0) 523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */ 524#endif /* !RBD_DEBUG */
525 525
526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
526static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
527static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1818 obj_request_done_set(obj_request); 1819 obj_request_done_set(obj_request);
1819} 1820}
1820 1821
1822static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1823{
1824 dout("%s: obj %p\n", __func__, obj_request);
1825
1826 if (obj_request_img_data_test(obj_request))
1827 rbd_osd_copyup_callback(obj_request);
1828 else
1829 obj_request_done_set(obj_request);
1830}
1831
1821static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, 1832static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1822 struct ceph_msg *msg) 1833 struct ceph_msg *msg)
1823{ 1834{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1866 rbd_osd_discard_callback(obj_request); 1877 rbd_osd_discard_callback(obj_request);
1867 break; 1878 break;
1868 case CEPH_OSD_OP_CALL: 1879 case CEPH_OSD_OP_CALL:
1880 rbd_osd_call_callback(obj_request);
1881 break;
1869 case CEPH_OSD_OP_NOTIFY_ACK: 1882 case CEPH_OSD_OP_NOTIFY_ACK:
1870 case CEPH_OSD_OP_WATCH: 1883 case CEPH_OSD_OP_WATCH:
1871 rbd_osd_trivial_callback(obj_request); 1884 rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
2530} 2543}
2531 2544
2532static void 2545static void
2533rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) 2546rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2534{ 2547{
2535 struct rbd_img_request *img_request; 2548 struct rbd_img_request *img_request;
2536 struct rbd_device *rbd_dev; 2549 struct rbd_device *rbd_dev;
2537 struct page **pages; 2550 struct page **pages;
2538 u32 page_count; 2551 u32 page_count;
2539 2552
2553 dout("%s: obj %p\n", __func__, obj_request);
2554
2540 rbd_assert(obj_request->type == OBJ_REQUEST_BIO || 2555 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2541 obj_request->type == OBJ_REQUEST_NODATA); 2556 obj_request->type == OBJ_REQUEST_NODATA);
2542 rbd_assert(obj_request_img_data_test(obj_request)); 2557 rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2563 if (!obj_request->result) 2578 if (!obj_request->result)
2564 obj_request->xferred = obj_request->length; 2579 obj_request->xferred = obj_request->length;
2565 2580
2566 /* Finish up with the normal image object callback */ 2581 obj_request_done_set(obj_request);
2567
2568 rbd_img_obj_callback(obj_request);
2569} 2582}
2570 2583
2571static void 2584static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2650 2663
2651 /* All set, send it off. */ 2664 /* All set, send it off. */
2652 2665
2653 orig_request->callback = rbd_img_obj_copyup_callback;
2654 osdc = &rbd_dev->rbd_client->client->osdc; 2666 osdc = &rbd_dev->rbd_client->client->osdc;
2655 img_result = rbd_obj_request_submit(osdc, orig_request); 2667 img_result = rbd_obj_request_submit(osdc, orig_request);
2656 if (!img_result) 2668 if (!img_result)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..954c0029fb3b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
369 return; 369 return;
370 } 370 }
371 371
372 if (work_pending(&blkif->persistent_purge_work)) { 372 if (work_busy(&blkif->persistent_purge_work)) {
373 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); 373 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
374 return; 374 return;
375 } 375 }
376 376
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..7a8a73f1fc04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
180 180
181static int blkfront_setup_indirect(struct blkfront_info *info); 181static int blkfront_setup_indirect(struct blkfront_info *info);
182static int blkfront_gather_backend_features(struct blkfront_info *info);
182 183
183static int get_id_from_freelist(struct blkfront_info *info) 184static int get_id_from_freelist(struct blkfront_info *info)
184{ 185{
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1128 * Add the used indirect page back to the list of 1129 * Add the used indirect page back to the list of
1129 * available pages for indirect grefs. 1130 * available pages for indirect grefs.
1130 */ 1131 */
1131 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); 1132 if (!info->feature_persistent) {
1132 list_add(&indirect_page->lru, &info->indirect_pages); 1133 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1134 list_add(&indirect_page->lru, &info->indirect_pages);
1135 }
1133 s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1136 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1134 list_add_tail(&s->indirect_grants[i]->node, &info->grants); 1137 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1135 } 1138 }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
1519 info->shadow_free = info->ring.req_prod_pvt; 1522 info->shadow_free = info->ring.req_prod_pvt;
1520 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 1523 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1521 1524
1522 rc = blkfront_setup_indirect(info); 1525 rc = blkfront_gather_backend_features(info);
1523 if (rc) { 1526 if (rc) {
1524 kfree(copy); 1527 kfree(copy);
1525 return rc; 1528 return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1720 1723
1721static int blkfront_setup_indirect(struct blkfront_info *info) 1724static int blkfront_setup_indirect(struct blkfront_info *info)
1722{ 1725{
1723 unsigned int indirect_segments, segs; 1726 unsigned int segs;
1724 int err, i; 1727 int err, i;
1725 1728
1726 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1729 if (info->max_indirect_segments == 0)
1727 "feature-max-indirect-segments", "%u", &indirect_segments,
1728 NULL);
1729 if (err) {
1730 info->max_indirect_segments = 0;
1731 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; 1730 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1732 } else { 1731 else
1733 info->max_indirect_segments = min(indirect_segments,
1734 xen_blkif_max_segments);
1735 segs = info->max_indirect_segments; 1732 segs = info->max_indirect_segments;
1736 }
1737 1733
1738 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); 1734 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
1739 if (err) 1735 if (err)
@@ -1797,6 +1793,68 @@ out_of_memory:
1797} 1793}
1798 1794
1799/* 1795/*
1796 * Gather all backend feature-*
1797 */
1798static int blkfront_gather_backend_features(struct blkfront_info *info)
1799{
1800 int err;
1801 int barrier, flush, discard, persistent;
1802 unsigned int indirect_segments;
1803
1804 info->feature_flush = 0;
1805
1806 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1807 "feature-barrier", "%d", &barrier,
1808 NULL);
1809
1810 /*
1811 * If there's no "feature-barrier" defined, then it means
1812 * we're dealing with a very old backend which writes
1813 * synchronously; nothing to do.
1814 *
1815 * If there are barriers, then we use flush.
1816 */
1817 if (!err && barrier)
1818 info->feature_flush = REQ_FLUSH | REQ_FUA;
1819 /*
1820 * And if there is "feature-flush-cache" use that above
1821 * barriers.
1822 */
1823 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1824 "feature-flush-cache", "%d", &flush,
1825 NULL);
1826
1827 if (!err && flush)
1828 info->feature_flush = REQ_FLUSH;
1829
1830 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1831 "feature-discard", "%d", &discard,
1832 NULL);
1833
1834 if (!err && discard)
1835 blkfront_setup_discard(info);
1836
1837 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1838 "feature-persistent", "%u", &persistent,
1839 NULL);
1840 if (err)
1841 info->feature_persistent = 0;
1842 else
1843 info->feature_persistent = persistent;
1844
1845 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1846 "feature-max-indirect-segments", "%u", &indirect_segments,
1847 NULL);
1848 if (err)
1849 info->max_indirect_segments = 0;
1850 else
1851 info->max_indirect_segments = min(indirect_segments,
1852 xen_blkif_max_segments);
1853
1854 return blkfront_setup_indirect(info);
1855}
1856
1857/*
1800 * Invoked when the backend is finally 'ready' (and has told produced 1858 * Invoked when the backend is finally 'ready' (and has told produced
1801 * the details about the physical device - #sectors, size, etc). 1859 * the details about the physical device - #sectors, size, etc).
1802 */ 1860 */
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
1807 unsigned int physical_sector_size; 1865 unsigned int physical_sector_size;
1808 unsigned int binfo; 1866 unsigned int binfo;
1809 int err; 1867 int err;
1810 int barrier, flush, discard, persistent;
1811 1868
1812 switch (info->connected) { 1869 switch (info->connected) {
1813 case BLKIF_STATE_CONNECTED: 1870 case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
1864 if (err != 1) 1921 if (err != 1)
1865 physical_sector_size = sector_size; 1922 physical_sector_size = sector_size;
1866 1923
1867 info->feature_flush = 0; 1924 err = blkfront_gather_backend_features(info);
1868
1869 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1870 "feature-barrier", "%d", &barrier,
1871 NULL);
1872
1873 /*
1874 * If there's no "feature-barrier" defined, then it means
1875 * we're dealing with a very old backend which writes
1876 * synchronously; nothing to do.
1877 *
1878 * If there are barriers, then we use flush.
1879 */
1880 if (!err && barrier)
1881 info->feature_flush = REQ_FLUSH | REQ_FUA;
1882 /*
1883 * And if there is "feature-flush-cache" use that above
1884 * barriers.
1885 */
1886 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1887 "feature-flush-cache", "%d", &flush,
1888 NULL);
1889
1890 if (!err && flush)
1891 info->feature_flush = REQ_FLUSH;
1892
1893 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1894 "feature-discard", "%d", &discard,
1895 NULL);
1896
1897 if (!err && discard)
1898 blkfront_setup_discard(info);
1899
1900 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1901 "feature-persistent", "%u", &persistent,
1902 NULL);
1903 if (err)
1904 info->feature_persistent = 0;
1905 else
1906 info->feature_persistent = persistent;
1907
1908 err = blkfront_setup_indirect(info);
1909 if (err) { 1925 if (err) {
1910 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", 1926 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1911 info->xbdev->otherend); 1927 info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..763301c7828c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
496 kfree(meta); 496 kfree(meta);
497} 497}
498 498
499static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) 499static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
500{ 500{
501 size_t num_pages; 501 size_t num_pages;
502 char pool_name[8];
503 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); 502 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
504 503
505 if (!meta) 504 if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
512 goto out_error; 511 goto out_error;
513 } 512 }
514 513
515 snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
516 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); 514 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
517 if (!meta->mem_pool) { 515 if (!meta->mem_pool) {
518 pr_err("Error creating memory pool\n"); 516 pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
1031 return -EINVAL; 1029 return -EINVAL;
1032 1030
1033 disksize = PAGE_ALIGN(disksize); 1031 disksize = PAGE_ALIGN(disksize);
1034 meta = zram_meta_alloc(zram->disk->first_minor, disksize); 1032 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1035 if (!meta) 1033 if (!meta)
1036 return -ENOMEM; 1034 return -ENOMEM;
1037 1035
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..9ceb8ac68fdc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
472 472
473 /* Read Verbose Config Version Info */ 473 /* Read Verbose Config Version Info */
474 skb = btbcm_read_verbose_config(hdev); 474 skb = btbcm_read_verbose_config(hdev);
475 if (IS_ERR(skb)) 475 if (!IS_ERR(skb)) {
476 return PTR_ERR(skb); 476 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
477 477 get_unaligned_le16(skb->data + 5));
478 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 478 kfree_skb(skb);
479 get_unaligned_le16(skb->data + 5)); 479 }
480 kfree_skb(skb);
481 480
482 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 481 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
483 482
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
429static void start_khwrngd(void) 429static void start_khwrngd(void)
430{ 430{
431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
432 if (hwrng_fill == ERR_PTR(-ENOMEM)) { 432 if (IS_ERR(hwrng_fill)) {
433 pr_err("hwrng_fill thread creation failed"); 433 pr_err("hwrng_fill thread creation failed");
434 hwrng_fill = NULL; 434 hwrng_fill = NULL;
435 } 435 }
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 283f00a7f036..1082d4bb016a 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
129 129
130 device_initialize(&chip->dev); 130 device_initialize(&chip->dev);
131 131
132 chip->cdev.owner = chip->pdev->driver->owner;
133 cdev_init(&chip->cdev, &tpm_fops); 132 cdev_init(&chip->cdev, &tpm_fops);
133 chip->cdev.owner = chip->pdev->driver->owner;
134 chip->cdev.kobj.parent = &chip->dev.kobj;
134 135
135 return chip; 136 return chip;
136} 137}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 44f9d20c19ac..1267322595da 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
233 return -ENODEV; 233 return -ENODEV;
234 } 234 }
235 235
236 /* At least some versions of AMI BIOS have a bug that TPM2 table has
237 * zero address for the control area and therefore we must fail.
238 */
239 if (!buf->control_area_pa) {
240 dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
241 return -EINVAL;
242 }
243
236 if (buf->hdr.length < sizeof(struct acpi_tpm2)) { 244 if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
237 dev_err(dev, "TPM2 ACPI table has wrong size"); 245 dev_err(dev, "TPM2 ACPI table has wrong size");
238 return -EINVAL; 246 return -EINVAL;
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 152dcb3f7b5f..61566bcefa53 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -116,8 +116,10 @@ void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
116 h32mxclk->pmc = pmc; 116 h32mxclk->pmc = pmc;
117 117
118 clk = clk_register(NULL, &h32mxclk->hw); 118 clk = clk_register(NULL, &h32mxclk->hw);
119 if (!clk) 119 if (!clk) {
120 kfree(h32mxclk);
120 return; 121 return;
122 }
121 123
122 of_clk_add_provider(np, of_clk_src_simple_get, clk); 124 of_clk_add_provider(np, of_clk_src_simple_get, clk);
123} 125}
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index c2400456a044..27dfa965cfed 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -171,8 +171,10 @@ at91_clk_register_main_osc(struct at91_pmc *pmc,
171 irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); 171 irq_set_status_flags(osc->irq, IRQ_NOAUTOEN);
172 ret = request_irq(osc->irq, clk_main_osc_irq_handler, 172 ret = request_irq(osc->irq, clk_main_osc_irq_handler,
173 IRQF_TRIGGER_HIGH, name, osc); 173 IRQF_TRIGGER_HIGH, name, osc);
174 if (ret) 174 if (ret) {
175 kfree(osc);
175 return ERR_PTR(ret); 176 return ERR_PTR(ret);
177 }
176 178
177 if (bypass) 179 if (bypass)
178 pmc_write(pmc, AT91_CKGR_MOR, 180 pmc_write(pmc, AT91_CKGR_MOR,
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index f98eafe9b12d..5b3ded5205a2 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -165,12 +165,16 @@ at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq,
165 irq_set_status_flags(master->irq, IRQ_NOAUTOEN); 165 irq_set_status_flags(master->irq, IRQ_NOAUTOEN);
166 ret = request_irq(master->irq, clk_master_irq_handler, 166 ret = request_irq(master->irq, clk_master_irq_handler,
167 IRQF_TRIGGER_HIGH, "clk-master", master); 167 IRQF_TRIGGER_HIGH, "clk-master", master);
168 if (ret) 168 if (ret) {
169 kfree(master);
169 return ERR_PTR(ret); 170 return ERR_PTR(ret);
171 }
170 172
171 clk = clk_register(NULL, &master->hw); 173 clk = clk_register(NULL, &master->hw);
172 if (IS_ERR(clk)) 174 if (IS_ERR(clk)) {
175 free_irq(master->irq, master);
173 kfree(master); 176 kfree(master);
177 }
174 178
175 return clk; 179 return clk;
176} 180}
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index cbbe40377ad6..18b60f4895a6 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -346,12 +346,16 @@ at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name,
346 irq_set_status_flags(pll->irq, IRQ_NOAUTOEN); 346 irq_set_status_flags(pll->irq, IRQ_NOAUTOEN);
347 ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH, 347 ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH,
348 id ? "clk-pllb" : "clk-plla", pll); 348 id ? "clk-pllb" : "clk-plla", pll);
349 if (ret) 349 if (ret) {
350 kfree(pll);
350 return ERR_PTR(ret); 351 return ERR_PTR(ret);
352 }
351 353
352 clk = clk_register(NULL, &pll->hw); 354 clk = clk_register(NULL, &pll->hw);
353 if (IS_ERR(clk)) 355 if (IS_ERR(clk)) {
356 free_irq(pll->irq, pll);
354 kfree(pll); 357 kfree(pll);
358 }
355 359
356 return clk; 360 return clk;
357} 361}
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index a76d03fd577b..58008b3e8bc1 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -130,13 +130,17 @@ at91_clk_register_system(struct at91_pmc *pmc, const char *name,
130 irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); 130 irq_set_status_flags(sys->irq, IRQ_NOAUTOEN);
131 ret = request_irq(sys->irq, clk_system_irq_handler, 131 ret = request_irq(sys->irq, clk_system_irq_handler,
132 IRQF_TRIGGER_HIGH, name, sys); 132 IRQF_TRIGGER_HIGH, name, sys);
133 if (ret) 133 if (ret) {
134 kfree(sys);
134 return ERR_PTR(ret); 135 return ERR_PTR(ret);
136 }
135 } 137 }
136 138
137 clk = clk_register(NULL, &sys->hw); 139 clk = clk_register(NULL, &sys->hw);
138 if (IS_ERR(clk)) 140 if (IS_ERR(clk)) {
141 free_irq(sys->irq, sys);
139 kfree(sys); 142 kfree(sys);
143 }
140 144
141 return clk; 145 return clk;
142} 146}
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index ae3263bc1476..30dd697b1668 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -118,12 +118,16 @@ at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq,
118 irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN); 118 irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN);
119 ret = request_irq(utmi->irq, clk_utmi_irq_handler, 119 ret = request_irq(utmi->irq, clk_utmi_irq_handler,
120 IRQF_TRIGGER_HIGH, "clk-utmi", utmi); 120 IRQF_TRIGGER_HIGH, "clk-utmi", utmi);
121 if (ret) 121 if (ret) {
122 kfree(utmi);
122 return ERR_PTR(ret); 123 return ERR_PTR(ret);
124 }
123 125
124 clk = clk_register(NULL, &utmi->hw); 126 clk = clk_register(NULL, &utmi->hw);
125 if (IS_ERR(clk)) 127 if (IS_ERR(clk)) {
128 free_irq(utmi->irq, utmi);
126 kfree(utmi); 129 kfree(utmi);
130 }
127 131
128 return clk; 132 return clk;
129} 133}
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index e19c09cd9645..f630e1bbdcfe 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -222,10 +222,6 @@ void __init iproc_asiu_setup(struct device_node *node,
222 struct iproc_asiu_clk *asiu_clk; 222 struct iproc_asiu_clk *asiu_clk;
223 const char *clk_name; 223 const char *clk_name;
224 224
225 clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
226 if (WARN_ON(!clk_name))
227 goto err_clk_register;
228
229 ret = of_property_read_string_index(node, "clock-output-names", 225 ret = of_property_read_string_index(node, "clock-output-names",
230 i, &clk_name); 226 i, &clk_name);
231 if (WARN_ON(ret)) 227 if (WARN_ON(ret))
@@ -259,7 +255,7 @@ void __init iproc_asiu_setup(struct device_node *node,
259 255
260err_clk_register: 256err_clk_register:
261 for (i = 0; i < num_clks; i++) 257 for (i = 0; i < num_clks; i++)
262 kfree(asiu->clks[i].name); 258 clk_unregister(asiu->clk_data.clks[i]);
263 iounmap(asiu->gate_base); 259 iounmap(asiu->gate_base);
264 260
265err_iomap_gate: 261err_iomap_gate:
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 46fb84bc2674..2dda4e8295a9 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -366,7 +366,7 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
366 val = readl(pll->pll_base + ctrl->ndiv_int.offset); 366 val = readl(pll->pll_base + ctrl->ndiv_int.offset);
367 ndiv_int = (val >> ctrl->ndiv_int.shift) & 367 ndiv_int = (val >> ctrl->ndiv_int.shift) &
368 bit_mask(ctrl->ndiv_int.width); 368 bit_mask(ctrl->ndiv_int.width);
369 ndiv = ndiv_int << ctrl->ndiv_int.shift; 369 ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
370 370
371 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { 371 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
372 val = readl(pll->pll_base + ctrl->ndiv_frac.offset); 372 val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
@@ -374,7 +374,8 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
374 bit_mask(ctrl->ndiv_frac.width); 374 bit_mask(ctrl->ndiv_frac.width);
375 375
376 if (ndiv_frac != 0) 376 if (ndiv_frac != 0)
377 ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac; 377 ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
378 ndiv_frac;
378 } 379 }
379 380
380 val = readl(pll->pll_base + ctrl->pdiv.offset); 381 val = readl(pll->pll_base + ctrl->pdiv.offset);
@@ -655,10 +656,6 @@ void __init iproc_pll_clk_setup(struct device_node *node,
655 memset(&init, 0, sizeof(init)); 656 memset(&init, 0, sizeof(init));
656 parent_name = node->name; 657 parent_name = node->name;
657 658
658 clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL);
659 if (WARN_ON(!clk_name))
660 goto err_clk_register;
661
662 ret = of_property_read_string_index(node, "clock-output-names", 659 ret = of_property_read_string_index(node, "clock-output-names",
663 i, &clk_name); 660 i, &clk_name);
664 if (WARN_ON(ret)) 661 if (WARN_ON(ret))
@@ -690,10 +687,8 @@ void __init iproc_pll_clk_setup(struct device_node *node,
690 return; 687 return;
691 688
692err_clk_register: 689err_clk_register:
693 for (i = 0; i < num_clks; i++) { 690 for (i = 0; i < num_clks; i++)
694 kfree(pll->clks[i].name);
695 clk_unregister(pll->clk_data.clks[i]); 691 clk_unregister(pll->clk_data.clks[i]);
696 }
697 692
698err_pll_register: 693err_pll_register:
699 if (pll->asiu_base) 694 if (pll->asiu_base)
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index b9b12a742970..3f6f7ad39490 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -268,7 +268,7 @@ static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary)
268 memcpy(table, stm32f42xx_gate_map, sizeof(table)); 268 memcpy(table, stm32f42xx_gate_map, sizeof(table));
269 269
270 /* only bits set in table can be used as indices */ 270 /* only bits set in table can be used as indices */
271 if (WARN_ON(secondary > 8 * sizeof(table) || 271 if (WARN_ON(secondary >= BITS_PER_BYTE * sizeof(table) ||
272 0 == (table[BIT_ULL_WORD(secondary)] & 272 0 == (table[BIT_ULL_WORD(secondary)] &
273 BIT_ULL_MASK(secondary)))) 273 BIT_ULL_MASK(secondary))))
274 return -EINVAL; 274 return -EINVAL;
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 4b9e04cdf7e8..8b6523d15fb8 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -700,6 +700,22 @@ static const struct mtk_composite peri_clks[] __initconst = {
700 MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1), 700 MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1),
701}; 701};
702 702
703static struct clk_onecell_data *mt8173_top_clk_data __initdata;
704static struct clk_onecell_data *mt8173_pll_clk_data __initdata;
705
706static void __init mtk_clk_enable_critical(void)
707{
708 if (!mt8173_top_clk_data || !mt8173_pll_clk_data)
709 return;
710
711 clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]);
712 clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]);
713 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]);
714 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
715 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]);
716 clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]);
717}
718
703static void __init mtk_topckgen_init(struct device_node *node) 719static void __init mtk_topckgen_init(struct device_node *node)
704{ 720{
705 struct clk_onecell_data *clk_data; 721 struct clk_onecell_data *clk_data;
@@ -712,19 +728,19 @@ static void __init mtk_topckgen_init(struct device_node *node)
712 return; 728 return;
713 } 729 }
714 730
715 clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); 731 mt8173_top_clk_data = clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
716 732
717 mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data); 733 mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
718 mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); 734 mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
719 mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, 735 mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
720 &mt8173_clk_lock, clk_data); 736 &mt8173_clk_lock, clk_data);
721 737
722 clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]);
723
724 r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); 738 r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
725 if (r) 739 if (r)
726 pr_err("%s(): could not register clock provider: %d\n", 740 pr_err("%s(): could not register clock provider: %d\n",
727 __func__, r); 741 __func__, r);
742
743 mtk_clk_enable_critical();
728} 744}
729CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init); 745CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init);
730 746
@@ -818,13 +834,13 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
818{ 834{
819 struct clk_onecell_data *clk_data; 835 struct clk_onecell_data *clk_data;
820 836
821 clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); 837 mt8173_pll_clk_data = clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
822 if (!clk_data) 838 if (!clk_data)
823 return; 839 return;
824 840
825 mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); 841 mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
826 842
827 clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]); 843 mtk_clk_enable_critical();
828} 844}
829CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys", 845CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys",
830 mtk_apmixedsys_init); 846 mtk_apmixedsys_init);
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..ac03ba49e9d1 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" }; 126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" }; 127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
128 128
129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB) 129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \ 130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
131 div_hp, bit, is_lp, flags) \ 131 div_hp, bit, is_lp, flags) \
132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \ 132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b95d17fbb8d7..92936f0912d2 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
530 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 530 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
531 struct freq_tbl f = *rcg->freq_tbl; 531 struct freq_tbl f = *rcg->freq_tbl;
532 const struct frac_entry *frac = frac_table_pixel; 532 const struct frac_entry *frac = frac_table_pixel;
533 unsigned long request, src_rate; 533 unsigned long request;
534 int delta = 100000; 534 int delta = 100000;
535 u32 mask = BIT(rcg->hid_width) - 1; 535 u32 mask = BIT(rcg->hid_width) - 1;
536 u32 hid_div; 536 u32 hid_div;
537 int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
538 struct clk *parent = clk_get_parent_by_index(hw->clk, index);
539 537
540 for (; frac->num; frac++) { 538 for (; frac->num; frac++) {
541 request = (rate * frac->den) / frac->num; 539 request = (rate * frac->den) / frac->num;
542 540
543 src_rate = __clk_round_rate(parent, request); 541 if ((parent_rate < (request - delta)) ||
544 if ((src_rate < (request - delta)) || 542 (parent_rate > (request + delta)))
545 (src_rate > (request + delta)))
546 continue; 543 continue;
547 544
548 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, 545 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index bdfb4421c643..f271c350ef94 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index dffd4ce6c8b5..58d678b5b40a 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 1afc18c4effc..1a722e99e76e 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 1b9b65bca51e..5ebddc528145 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 628b6d5ed3d9..157fe099ea6a 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.linux@gmail.com> 3 * Viresh Kumar <vireshk@kernel.org>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 931737677dfa..9834944f08b1 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
2 * Clock framework definitions for SPEAr platform 2 * Clock framework definitions for SPEAr platform
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 4daa5977793a..222ce108b41a 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine clock framework source file 4 * SPEAr1310 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 5a5c6648308d..973c9d3fbcf8 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine clock framework source file 4 * SPEAr1340 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index bb5f387774e2..404a55edd613 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr3xx machines clock framework source file 2 * SPEAr3xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 4f649c9cb094..231061fa73a4 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr6xx machines clock framework source file 2 * SPEAr6xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 657ca14ba709..8dd8cce27361 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -190,7 +190,7 @@ static struct clk *clk_register_flexgen(const char *name,
190 190
191 init.name = name; 191 init.name = name;
192 init.ops = &flexgen_ops; 192 init.ops = &flexgen_ops;
193 init.flags = CLK_IS_BASIC | flexgen_flags; 193 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
194 init.parent_names = parent_names; 194 init.parent_names = parent_names;
195 init.num_parents = num_parents; 195 init.num_parents = num_parents;
196 196
@@ -303,6 +303,8 @@ static void __init st_of_flexgen_setup(struct device_node *np)
303 if (!rlock) 303 if (!rlock)
304 goto err; 304 goto err;
305 305
306 spin_lock_init(rlock);
307
306 for (i = 0; i < clk_data->clk_num; i++) { 308 for (i = 0; i < clk_data->clk_num; i++) {
307 struct clk *clk; 309 struct clk *clk;
308 const char *clk_name; 310 const char *clk_name;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index e94197f04b0b..d9eb2e1d8471 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
340 CLKGEN_FIELD(0x30c, 0xf, 20), 340 CLKGEN_FIELD(0x30c, 0xf, 20),
341 CLKGEN_FIELD(0x310, 0xf, 20) }, 341 CLKGEN_FIELD(0x310, 0xf, 20) },
342 .lockstatus_present = true, 342 .lockstatus_present = true,
343 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), 343 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
344 .powerup_polarity = 1, 344 .powerup_polarity = 1,
345 .standby_polarity = 1, 345 .standby_polarity = 1,
346 .pll_ops = &st_quadfs_pll_c32_ops, 346 .pll_ops = &st_quadfs_pll_c32_ops,
@@ -489,7 +489,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw)
489 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 489 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw);
490 u32 npda = CLKGEN_READ(pll, npda); 490 u32 npda = CLKGEN_READ(pll, npda);
491 491
492 return !!npda; 492 return pll->data->powerup_polarity ? !npda : !!npda;
493} 493}
494 494
495static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, 495static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs,
@@ -635,7 +635,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
635 635
636 init.name = name; 636 init.name = name;
637 init.ops = quadfs->pll_ops; 637 init.ops = quadfs->pll_ops;
638 init.flags = CLK_IS_BASIC; 638 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
639 init.parent_names = &parent_name; 639 init.parent_names = &parent_name;
640 init.num_parents = 1; 640 init.num_parents = 1;
641 641
@@ -774,7 +774,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw)
774 if (fs->lock) 774 if (fs->lock)
775 spin_lock_irqsave(fs->lock, flags); 775 spin_lock_irqsave(fs->lock, flags);
776 776
777 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); 777 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity);
778 778
779 if (fs->lock) 779 if (fs->lock)
780 spin_unlock_irqrestore(fs->lock, flags); 780 spin_unlock_irqrestore(fs->lock, flags);
@@ -1082,10 +1082,6 @@ static const struct of_device_id quadfs_of_match[] = {
1082 .compatible = "st,stih407-quadfs660-D", 1082 .compatible = "st,stih407-quadfs660-D",
1083 .data = &st_fs660c32_D_407 1083 .data = &st_fs660c32_D_407
1084 }, 1084 },
1085 {
1086 .compatible = "st,stih407-quadfs660-D",
1087 .data = (void *)&st_fs660c32_D_407
1088 },
1089 {} 1085 {}
1090}; 1086};
1091 1087
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 4fbe6e099587..717c4a91a17b 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -237,7 +237,7 @@ static struct clk *clk_register_genamux(const char *name,
237 237
238 init.name = name; 238 init.name = name;
239 init.ops = &clkgena_divmux_ops; 239 init.ops = &clkgena_divmux_ops;
240 init.flags = CLK_IS_BASIC; 240 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
241 init.parent_names = parent_names; 241 init.parent_names = parent_names;
242 init.num_parents = num_parents; 242 init.num_parents = num_parents;
243 243
@@ -513,7 +513,8 @@ static void __init st_of_clkgena_prediv_setup(struct device_node *np)
513 0, &clk_name)) 513 0, &clk_name))
514 return; 514 return;
515 515
516 clk = clk_register_divider_table(NULL, clk_name, parent_name, 0, 516 clk = clk_register_divider_table(NULL, clk_name, parent_name,
517 CLK_GET_RATE_NOCACHE,
517 reg + data->offset, data->shift, 1, 518 reg + data->offset, data->shift, 1,
518 0, data->table, NULL); 519 0, data->table, NULL);
519 if (IS_ERR(clk)) 520 if (IS_ERR(clk))
@@ -582,7 +583,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
582}; 583};
583static struct clkgen_mux_data stih407_a9_mux_data = { 584static struct clkgen_mux_data stih407_a9_mux_data = {
584 .offset = 0x1a4, 585 .offset = 0x1a4,
585 .shift = 1, 586 .shift = 0,
586 .width = 2, 587 .width = 2,
587}; 588};
588 589
@@ -786,7 +787,8 @@ static void __init st_of_clkgen_vcc_setup(struct device_node *np)
786 &mux->hw, &clk_mux_ops, 787 &mux->hw, &clk_mux_ops,
787 &div->hw, &clk_divider_ops, 788 &div->hw, &clk_divider_ops,
788 &gate->hw, &clk_gate_ops, 789 &gate->hw, &clk_gate_ops,
789 data->clk_flags); 790 data->clk_flags |
791 CLK_GET_RATE_NOCACHE);
790 if (IS_ERR(clk)) { 792 if (IS_ERR(clk)) {
791 kfree(gate); 793 kfree(gate);
792 kfree(div); 794 kfree(div);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 106532207213..72d1c27eaffa 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -406,7 +406,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
406 init.name = clk_name; 406 init.name = clk_name;
407 init.ops = pll_data->ops; 407 init.ops = pll_data->ops;
408 408
409 init.flags = CLK_IS_BASIC; 409 init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
410 init.parent_names = &parent_name; 410 init.parent_names = &parent_name;
411 init.num_parents = 1; 411 init.num_parents = 1;
412 412
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 9a82f17d2d73..abf7b37faf73 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -1391,6 +1391,7 @@ static void __init sun6i_init_clocks(struct device_node *node)
1391CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks); 1391CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
1392CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks); 1392CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
1393CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks); 1393CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
1394CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
1394 1395
1395static void __init sun9i_init_clocks(struct device_node *node) 1396static void __init sun9i_init_clocks(struct device_node *node)
1396{ 1397{
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..c96de14036a0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
661{ 661{
662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
663 663
664 if (!ch->cs_enabled)
665 return;
666
664 sh_cmt_stop(ch, FLAG_CLOCKSOURCE); 667 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
665 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); 668 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
666} 669}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
669{ 672{
670 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 673 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
671 674
675 if (!ch->cs_enabled)
676 return;
677
672 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); 678 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
673 sh_cmt_start(ch, FLAG_CLOCKSOURCE); 679 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
674} 680}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 879c78423546..86c7eb66bdfb 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
462 BUG_ON(!imxtm->base); 462 BUG_ON(!imxtm->base);
463 463
464 imxtm->type = type; 464 imxtm->type = type;
465 imxtm->irq = irq;
465 466
466 _mxc_timer_init(imxtm); 467 _mxc_timer_init(imxtm);
467} 468}
@@ -529,6 +530,7 @@ static void __init imx6dl_timer_init_dt(struct device_node *np)
529 530
530CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 531CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
531CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); 532CLOCKSOURCE_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
533CLOCKSOURCE_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
532CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); 534CLOCKSOURCE_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
533CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); 535CLOCKSOURCE_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
534CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); 536CLOCKSOURCE_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b612411655f9..7a3c30c4336f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
169} 169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171 171
172struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
173{
174 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
175
176 return policy && !policy_is_inactive(policy) ?
177 policy->freq_table : NULL;
178}
179EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
180
172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 181static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{ 182{
174 u64 idle_time; 183 u64 idle_time;
@@ -993,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
993 int ret = 0; 1002 int ret = 0;
994 1003
995 /* Some related CPUs might not be present (physically hotplugged) */ 1004 /* Some related CPUs might not be present (physically hotplugged) */
996 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1005 for_each_cpu(j, policy->real_cpus) {
997 if (j == policy->kobj_cpu) 1006 if (j == policy->kobj_cpu)
998 continue; 1007 continue;
999 1008
@@ -1010,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1010 unsigned int j; 1019 unsigned int j;
1011 1020
1012 /* Some related CPUs might not be present (physically hotplugged) */ 1021 /* Some related CPUs might not be present (physically hotplugged) */
1013 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1022 for_each_cpu(j, policy->real_cpus) {
1014 if (j == policy->kobj_cpu) 1023 if (j == policy->kobj_cpu)
1015 continue; 1024 continue;
1016 1025
@@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1132 1141
1133 down_write(&policy->rwsem); 1142 down_write(&policy->rwsem);
1134 policy->cpu = cpu; 1143 policy->cpu = cpu;
1144 policy->governor = NULL;
1135 up_write(&policy->rwsem); 1145 up_write(&policy->rwsem);
1136 } 1146 }
1137 1147
@@ -1153,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1153 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1154 goto err_free_cpumask; 1164 goto err_free_cpumask;
1155 1165
1166 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1167 goto err_free_rcpumask;
1168
1156 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 1169 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1157 "cpufreq"); 1170 "cpufreq");
1158 if (ret) { 1171 if (ret) {
1159 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1172 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1160 goto err_free_rcpumask; 1173 goto err_free_real_cpus;
1161 } 1174 }
1162 1175
1163 INIT_LIST_HEAD(&policy->policy_list); 1176 INIT_LIST_HEAD(&policy->policy_list);
@@ -1174,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1174 1187
1175 return policy; 1188 return policy;
1176 1189
1190err_free_real_cpus:
1191 free_cpumask_var(policy->real_cpus);
1177err_free_rcpumask: 1192err_free_rcpumask:
1178 free_cpumask_var(policy->related_cpus); 1193 free_cpumask_var(policy->related_cpus);
1179err_free_cpumask: 1194err_free_cpumask:
@@ -1224,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1224 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1225 1240
1226 cpufreq_policy_put_kobj(policy, notify); 1241 cpufreq_policy_put_kobj(policy, notify);
1242 free_cpumask_var(policy->real_cpus);
1227 free_cpumask_var(policy->related_cpus); 1243 free_cpumask_var(policy->related_cpus);
1228 free_cpumask_var(policy->cpus); 1244 free_cpumask_var(policy->cpus);
1229 kfree(policy); 1245 kfree(policy);
@@ -1248,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1248 1264
1249 pr_debug("adding CPU %u\n", cpu); 1265 pr_debug("adding CPU %u\n", cpu);
1250 1266
1251 /* 1267 if (cpu_is_offline(cpu)) {
1252 * Only possible if 'cpu' wasn't physically present earlier and we are 1268 /*
1253 * here from subsys_interface add callback. A hotplug notifier will 1269 * Only possible if we are here from the subsys_interface add
1254 * follow and we will handle it like logical CPU hotplug then. For now, 1270 * callback. A hotplug notifier will follow and we will handle
1255 * just create the sysfs link. 1271 * it as CPU online then. For now, just create the sysfs link,
1256 */ 1272 * unless there is no policy or the link is already present.
1257 if (cpu_is_offline(cpu)) 1273 */
1258 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1277 }
1259 1278
1260 if (!down_read_trylock(&cpufreq_rwsem)) 1279 if (!down_read_trylock(&cpufreq_rwsem))
1261 return 0; 1280 return 0;
@@ -1297,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1297 /* related cpus should atleast have policy->cpus */ 1316 /* related cpus should atleast have policy->cpus */
1298 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1299 1318
1319 /* Remember which CPUs have been present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1322
1300 /* 1323 /*
1301 * affected cpus must always be the one, which are online. We aren't 1324 * affected cpus must always be the one, which are online. We aren't
1302 * managing offline cpus here. 1325 * managing offline cpus here.
@@ -1410,8 +1433,7 @@ nomem_out:
1410 return ret; 1433 return ret;
1411} 1434}
1412 1435
1413static int __cpufreq_remove_dev_prepare(struct device *dev, 1436static int __cpufreq_remove_dev_prepare(struct device *dev)
1414 struct subsys_interface *sif)
1415{ 1437{
1416 unsigned int cpu = dev->id; 1438 unsigned int cpu = dev->id;
1417 int ret = 0; 1439 int ret = 0;
@@ -1427,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1427 1449
1428 if (has_target()) { 1450 if (has_target()) {
1429 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1430 if (ret) { 1452 if (ret)
1431 pr_err("%s: Failed to stop governor\n", __func__); 1453 pr_err("%s: Failed to stop governor\n", __func__);
1432 return ret;
1433 }
1434 } 1454 }
1435 1455
1436 down_write(&policy->rwsem); 1456 down_write(&policy->rwsem);
@@ -1463,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1463 return ret; 1483 return ret;
1464} 1484}
1465 1485
1466static int __cpufreq_remove_dev_finish(struct device *dev, 1486static int __cpufreq_remove_dev_finish(struct device *dev)
1467 struct subsys_interface *sif)
1468{ 1487{
1469 unsigned int cpu = dev->id; 1488 unsigned int cpu = dev->id;
1470 int ret; 1489 int ret;
@@ -1482,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1482 /* If cpu is last user of policy, free policy */ 1501 /* If cpu is last user of policy, free policy */
1483 if (has_target()) { 1502 if (has_target()) {
1484 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1485 if (ret) { 1504 if (ret)
1486 pr_err("%s: Failed to exit governor\n", __func__); 1505 pr_err("%s: Failed to exit governor\n", __func__);
1487 return ret;
1488 }
1489 } 1506 }
1490 1507
1491 /* 1508 /*
@@ -1496,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1496 if (cpufreq_driver->exit) 1513 if (cpufreq_driver->exit)
1497 cpufreq_driver->exit(policy); 1514 cpufreq_driver->exit(policy);
1498 1515
1499 /* Free the policy only if the driver is getting removed. */
1500 if (sif)
1501 cpufreq_policy_free(policy, true);
1502
1503 return 0; 1516 return 0;
1504} 1517}
1505 1518
@@ -1511,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1511static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1524static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1512{ 1525{
1513 unsigned int cpu = dev->id; 1526 unsigned int cpu = dev->id;
1514 int ret; 1527 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1515
1516 /*
1517 * Only possible if 'cpu' is getting physically removed now. A hotplug
1518 * notifier should have already been called and we just need to remove
1519 * link or free policy here.
1520 */
1521 if (cpu_is_offline(cpu)) {
1522 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1523 struct cpumask mask;
1524 1528
1525 if (!policy) 1529 if (!policy)
1526 return 0; 1530 return 0;
1527 1531
1528 cpumask_copy(&mask, policy->related_cpus); 1532 if (cpu_online(cpu)) {
1529 cpumask_clear_cpu(cpu, &mask); 1533 __cpufreq_remove_dev_prepare(dev);
1534 __cpufreq_remove_dev_finish(dev);
1535 }
1530 1536
1531 /* 1537 cpumask_clear_cpu(cpu, policy->real_cpus);
1532 * Free policy only if all policy->related_cpus are removed
1533 * physically.
1534 */
1535 if (cpumask_intersects(&mask, cpu_present_mask)) {
1536 remove_cpu_dev_symlink(policy, cpu);
1537 return 0;
1538 }
1539 1538
1539 if (cpumask_empty(policy->real_cpus)) {
1540 cpufreq_policy_free(policy, true); 1540 cpufreq_policy_free(policy, true);
1541 return 0; 1541 return 0;
1542 } 1542 }
1543 1543
1544 ret = __cpufreq_remove_dev_prepare(dev, sif); 1544 if (cpu != policy->kobj_cpu) {
1545 remove_cpu_dev_symlink(policy, cpu);
1546 } else {
1547 /*
1548 * The CPU owning the policy object is going away. Move it to
1549 * another suitable CPU.
1550 */
1551 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1552 struct device *new_dev = get_cpu_device(new_cpu);
1545 1553
1546 if (!ret) 1554 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1547 ret = __cpufreq_remove_dev_finish(dev, sif);
1548 1555
1549 return ret; 1556 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1557 policy->kobj_cpu = new_cpu;
1558 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1559 }
1560
1561 return 0;
1550} 1562}
1551 1563
1552static void handle_update(struct work_struct *work) 1564static void handle_update(struct work_struct *work)
@@ -2385,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2385 break; 2397 break;
2386 2398
2387 case CPU_DOWN_PREPARE: 2399 case CPU_DOWN_PREPARE:
2388 __cpufreq_remove_dev_prepare(dev, NULL); 2400 __cpufreq_remove_dev_prepare(dev);
2389 break; 2401 break;
2390 2402
2391 case CPU_POST_DEAD: 2403 case CPU_POST_DEAD:
2392 __cpufreq_remove_dev_finish(dev, NULL); 2404 __cpufreq_remove_dev_finish(dev);
2393 break; 2405 break;
2394 2406
2395 case CPU_DOWN_FAILED: 2407 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index ae5b2bd3a978..fa3dd840a837 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
180 ret = exynos5250_cpufreq_init(exynos_info); 180 ret = exynos5250_cpufreq_init(exynos_info);
181 } else { 181 } else {
182 pr_err("%s: Unknown SoC type\n", __func__); 182 pr_err("%s: Unknown SoC type\n", __func__);
183 return -ENODEV; 183 ret = -ENODEV;
184 } 184 }
185 185
186 if (ret) 186 if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
188 188
189 if (exynos_info->set_freq == NULL) { 189 if (exynos_info->set_freq == NULL) {
190 dev_err(&pdev->dev, "No set_freq function (ERR)\n"); 190 dev_err(&pdev->dev, "No set_freq function (ERR)\n");
191 ret = -EINVAL;
191 goto err_vdd_arm; 192 goto err_vdd_arm;
192 } 193 }
193 194
194 arm_regulator = regulator_get(NULL, "vdd_arm"); 195 arm_regulator = regulator_get(NULL, "vdd_arm");
195 if (IS_ERR(arm_regulator)) { 196 if (IS_ERR(arm_regulator)) {
196 dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); 197 dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
198 ret = -EINVAL;
197 goto err_vdd_arm; 199 goto err_vdd_arm;
198 } 200 }
199 201
@@ -225,7 +227,7 @@ err_cpufreq_reg:
225 regulator_put(arm_regulator); 227 regulator_put(arm_regulator);
226err_vdd_arm: 228err_vdd_arm:
227 kfree(exynos_info); 229 kfree(exynos_info);
228 return -EINVAL; 230 return ret;
229} 231}
230 232
231static struct platform_driver exynos_cpufreq_platdrv = { 233static struct platform_driver exynos_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index df14766a8e06..dfbbf981ed56 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
297} 297}
298EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show); 298EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
299 299
300struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
301
302struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
303{
304 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
305 return policy ? policy->freq_table : NULL;
306}
307EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
308
309MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); 300MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
310MODULE_DESCRIPTION("CPUfreq frequency table helpers"); 301MODULE_DESCRIPTION("CPUfreq frequency table helpers");
311MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..fcb929ec5304 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
681 .get_max = core_get_max_pstate, 681 .get_max = core_get_max_pstate,
682 .get_min = core_get_min_pstate, 682 .get_min = core_get_min_pstate,
683 .get_turbo = knl_get_turbo_pstate, 683 .get_turbo = knl_get_turbo_pstate,
684 .get_scaling = core_get_scaling,
684 .set = core_set_pstate, 685 .set = core_set_pstate,
685 }, 686 },
686}; 687};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index fc897babab55..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * The 2E revision of loongson processor not support this feature. 4 * The 2E revision of loongson processor not support this feature.
5 * 5 *
6 * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology 6 * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
7 * Author: Yanhua, yanh@lemote.com 7 * Author: Yanhua, yanh@lemote.com
8 * 8 *
9 * This file is subject to the terms and conditions of the GNU General Public 9 * This file is subject to the terms and conditions of the GNU General Public
@@ -20,7 +20,7 @@
20#include <asm/clock.h> 20#include <asm/clock.h>
21#include <asm/idle.h> 21#include <asm/idle.h>
22 22
23#include <asm/mach-loongson/loongson.h> 23#include <asm/mach-loongson64/loongson.h>
24 24
25static uint nowait; 25static uint nowait;
26 26
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index e8e2775c3821..48b7228563ad 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
112static void enter_freeze_proper(struct cpuidle_driver *drv, 112static void enter_freeze_proper(struct cpuidle_driver *drv,
113 struct cpuidle_device *dev, int index) 113 struct cpuidle_device *dev, int index)
114{ 114{
115 tick_freeze(); 115 /*
116 * trace_suspend_resume() called by tick_freeze() for the last CPU
117 * executing it contains RCU usage regarded as invalid in the idle
118 * context, so tell RCU about that.
119 */
120 RCU_NONIDLE(tick_freeze());
116 /* 121 /*
117 * The state used here cannot be a "coupled" one, because the "coupled" 122 * The state used here cannot be a "coupled" one, because the "coupled"
118 * cpuidle mechanism enables interrupts and doing that with timekeeping 123 * cpuidle mechanism enables interrupts and doing that with timekeeping
@@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
122 WARN_ON(!irqs_disabled()); 127 WARN_ON(!irqs_disabled());
123 /* 128 /*
124 * timekeeping_resume() that will be called by tick_unfreeze() for the 129 * timekeeping_resume() that will be called by tick_unfreeze() for the
125 * last CPU executing it calls functions containing RCU read-side 130 * first CPU executing it calls functions containing RCU read-side
126 * critical sections, so tell RCU about that. 131 * critical sections, so tell RCU about that.
127 */ 132 */
128 RCU_NONIDLE(tick_unfreeze()); 133 RCU_NONIDLE(tick_unfreeze());
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e8099969..f9c78751989e 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
909 state->buflen_1; 909 state->buflen_1;
910 u32 *sh_desc = ctx->sh_desc_fin, *desc; 910 u32 *sh_desc = ctx->sh_desc_fin, *desc;
911 dma_addr_t ptr = ctx->sh_desc_fin_dma; 911 dma_addr_t ptr = ctx->sh_desc_fin_dma;
912 int sec4_sg_bytes; 912 int sec4_sg_bytes, sec4_sg_src_index;
913 int digestsize = crypto_ahash_digestsize(ahash); 913 int digestsize = crypto_ahash_digestsize(ahash);
914 struct ahash_edesc *edesc; 914 struct ahash_edesc *edesc;
915 int ret = 0; 915 int ret = 0;
916 int sh_len; 916 int sh_len;
917 917
918 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); 918 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
919 sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
919 920
920 /* allocate space for base edesc and hw desc commands, link tables */ 921 /* allocate space for base edesc and hw desc commands, link tables */
921 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN + 922 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
942 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, 943 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
943 buf, state->buf_dma, buflen, 944 buf, state->buf_dma, buflen,
944 last_buflen); 945 last_buflen);
945 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN; 946 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
946 947
947 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, 948 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
948 sec4_sg_bytes, DMA_TO_DEVICE); 949 sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..402631a19a11 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
905 crypt->mode |= NPE_OP_NOT_IN_PLACE; 905 crypt->mode |= NPE_OP_NOT_IN_PLACE;
906 /* This was never tested by Intel 906 /* This was never tested by Intel
907 * for more than one dst buffer, I think. */ 907 * for more than one dst buffer, I think. */
908 BUG_ON(req->dst->length < nbytes);
909 req_ctx->dst = NULL; 908 req_ctx->dst = NULL;
910 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 909 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
911 flags, DMA_FROM_DEVICE)) 910 flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 67f80813a06f..e4311ce0cd78 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -494,8 +494,9 @@ out:
494static int ccm4309_aes_nx_encrypt(struct aead_request *req) 494static int ccm4309_aes_nx_encrypt(struct aead_request *req)
495{ 495{
496 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 496 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
497 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
497 struct blkcipher_desc desc; 498 struct blkcipher_desc desc;
498 u8 *iv = nx_ctx->priv.ccm.iv; 499 u8 *iv = rctx->iv;
499 500
500 iv[0] = 3; 501 iv[0] = 3;
501 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); 502 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
525static int ccm4309_aes_nx_decrypt(struct aead_request *req) 526static int ccm4309_aes_nx_decrypt(struct aead_request *req)
526{ 527{
527 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 528 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
529 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
528 struct blkcipher_desc desc; 530 struct blkcipher_desc desc;
529 u8 *iv = nx_ctx->priv.ccm.iv; 531 u8 *iv = rctx->iv;
530 532
531 iv[0] = 3; 533 iv[0] = 3;
532 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); 534 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 2617cd4d54dd..dd7e9f3f5b6b 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
72 if (key_len < CTR_RFC3686_NONCE_SIZE) 72 if (key_len < CTR_RFC3686_NONCE_SIZE)
73 return -EINVAL; 73 return -EINVAL;
74 74
75 memcpy(nx_ctx->priv.ctr.iv, 75 memcpy(nx_ctx->priv.ctr.nonce,
76 in_key + key_len - CTR_RFC3686_NONCE_SIZE, 76 in_key + key_len - CTR_RFC3686_NONCE_SIZE,
77 CTR_RFC3686_NONCE_SIZE); 77 CTR_RFC3686_NONCE_SIZE);
78 78
@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
131 unsigned int nbytes) 131 unsigned int nbytes)
132{ 132{
133 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); 133 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
134 u8 *iv = nx_ctx->priv.ctr.iv; 134 u8 iv[16];
135 135
136 memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
136 memcpy(iv + CTR_RFC3686_NONCE_SIZE, 137 memcpy(iv + CTR_RFC3686_NONCE_SIZE,
137 desc->info, CTR_RFC3686_IV_SIZE); 138 desc->info, CTR_RFC3686_IV_SIZE);
138 iv[12] = iv[13] = iv[14] = 0; 139 iv[12] = iv[13] = iv[14] = 0;
139 iv[15] = 1; 140 iv[15] = 1;
140 141
141 desc->info = nx_ctx->priv.ctr.iv; 142 desc->info = iv;
142 143
143 return ctr_aes_nx_crypt(desc, dst, src, nbytes); 144 return ctr_aes_nx_crypt(desc, dst, src, nbytes);
144} 145}
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 08ac6d48688c..92c993f08213 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -317,6 +317,7 @@ out:
317static int gcm_aes_nx_crypt(struct aead_request *req, int enc) 317static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
318{ 318{
319 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 319 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
320 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
320 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 321 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
321 struct blkcipher_desc desc; 322 struct blkcipher_desc desc;
322 unsigned int nbytes = req->cryptlen; 323 unsigned int nbytes = req->cryptlen;
@@ -326,7 +327,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
326 327
327 spin_lock_irqsave(&nx_ctx->lock, irq_flags); 328 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
328 329
329 desc.info = nx_ctx->priv.gcm.iv; 330 desc.info = rctx->iv;
330 /* initialize the counter */ 331 /* initialize the counter */
331 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; 332 *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
332 333
@@ -424,8 +425,8 @@ out:
424 425
425static int gcm_aes_nx_encrypt(struct aead_request *req) 426static int gcm_aes_nx_encrypt(struct aead_request *req)
426{ 427{
427 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 428 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
428 char *iv = nx_ctx->priv.gcm.iv; 429 char *iv = rctx->iv;
429 430
430 memcpy(iv, req->iv, 12); 431 memcpy(iv, req->iv, 12);
431 432
@@ -434,8 +435,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
434 435
435static int gcm_aes_nx_decrypt(struct aead_request *req) 436static int gcm_aes_nx_decrypt(struct aead_request *req)
436{ 437{
437 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 438 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
438 char *iv = nx_ctx->priv.gcm.iv; 439 char *iv = rctx->iv;
439 440
440 memcpy(iv, req->iv, 12); 441 memcpy(iv, req->iv, 12);
441 442
@@ -445,7 +446,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
445static int gcm4106_aes_nx_encrypt(struct aead_request *req) 446static int gcm4106_aes_nx_encrypt(struct aead_request *req)
446{ 447{
447 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 448 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
448 char *iv = nx_ctx->priv.gcm.iv; 449 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
450 char *iv = rctx->iv;
449 char *nonce = nx_ctx->priv.gcm.nonce; 451 char *nonce = nx_ctx->priv.gcm.nonce;
450 452
451 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 453 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
@@ -457,7 +459,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
457static int gcm4106_aes_nx_decrypt(struct aead_request *req) 459static int gcm4106_aes_nx_decrypt(struct aead_request *req)
458{ 460{
459 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); 461 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
460 char *iv = nx_ctx->priv.gcm.iv; 462 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
463 char *iv = rctx->iv;
461 char *nonce = nx_ctx->priv.gcm.nonce; 464 char *nonce = nx_ctx->priv.gcm.nonce;
462 465
463 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); 466 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 8c2faffab4a3..c2f7d4befb55 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
42 unsigned int key_len) 42 unsigned int key_len)
43{ 43{
44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); 44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
45 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
45 46
46 switch (key_len) { 47 switch (key_len) {
47 case AES_KEYSIZE_128: 48 case AES_KEYSIZE_128:
@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
51 return -EINVAL; 52 return -EINVAL;
52 } 53 }
53 54
54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len); 55 memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
55 56
56 return 0; 57 return 0;
57} 58}
@@ -148,32 +149,29 @@ out:
148 return rc; 149 return rc;
149} 150}
150 151
151static int nx_xcbc_init(struct shash_desc *desc) 152static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
152{ 153{
153 struct xcbc_state *sctx = shash_desc_ctx(desc); 154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
154 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 155 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
156 struct nx_sg *out_sg; 156 int err;
157 int len;
158 157
159 nx_ctx_init(nx_ctx, HCOP_FC_AES); 158 err = nx_crypto_ctx_aes_xcbc_init(tfm);
159 if (err)
160 return err;
160 161
161 memset(sctx, 0, sizeof *sctx); 162 nx_ctx_init(nx_ctx, HCOP_FC_AES);
162 163
163 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); 164 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
164 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; 165 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
165 166
166 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE); 167 return 0;
167 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key); 168}
168
169 len = AES_BLOCK_SIZE;
170 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
171 &len, nx_ctx->ap->sglen);
172 169
173 if (len != AES_BLOCK_SIZE) 170static int nx_xcbc_init(struct shash_desc *desc)
174 return -EINVAL; 171{
172 struct xcbc_state *sctx = shash_desc_ctx(desc);
175 173
176 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 174 memset(sctx, 0, sizeof *sctx);
177 175
178 return 0; 176 return 0;
179} 177}
@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
186 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 184 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
187 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; 185 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
188 struct nx_sg *in_sg; 186 struct nx_sg *in_sg;
187 struct nx_sg *out_sg;
189 u32 to_process = 0, leftover, total; 188 u32 to_process = 0, leftover, total;
190 unsigned int max_sg_len; 189 unsigned int max_sg_len;
191 unsigned long irq_flags; 190 unsigned long irq_flags;
@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
213 max_sg_len = min_t(u64, max_sg_len, 212 max_sg_len = min_t(u64, max_sg_len,
214 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 213 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
215 214
215 data_len = AES_BLOCK_SIZE;
216 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
217 &len, nx_ctx->ap->sglen);
218
219 if (data_len != AES_BLOCK_SIZE) {
220 rc = -EINVAL;
221 goto out;
222 }
223
224 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
225
216 do { 226 do {
217 to_process = total - to_process; 227 to_process = total - to_process;
218 to_process = to_process & ~(AES_BLOCK_SIZE - 1); 228 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
235 (u8 *) sctx->buffer, 245 (u8 *) sctx->buffer,
236 &data_len, 246 &data_len,
237 max_sg_len); 247 max_sg_len);
238 if (data_len != sctx->count) 248 if (data_len != sctx->count) {
239 return -EINVAL; 249 rc = -EINVAL;
250 goto out;
251 }
240 } 252 }
241 253
242 data_len = to_process - sctx->count; 254 data_len = to_process - sctx->count;
@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
245 &data_len, 257 &data_len,
246 max_sg_len); 258 max_sg_len);
247 259
248 if (data_len != to_process - sctx->count) 260 if (data_len != to_process - sctx->count) {
249 return -EINVAL; 261 rc = -EINVAL;
262 goto out;
263 }
250 264
251 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 265 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
252 sizeof(struct nx_sg); 266 sizeof(struct nx_sg);
@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
325 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, 339 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
326 &len, nx_ctx->ap->sglen); 340 &len, nx_ctx->ap->sglen);
327 341
328 if (len != sctx->count) 342 if (len != sctx->count) {
329 return -EINVAL; 343 rc = -EINVAL;
344 goto out;
345 }
330 346
331 len = AES_BLOCK_SIZE; 347 len = AES_BLOCK_SIZE;
332 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, 348 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
333 nx_ctx->ap->sglen); 349 nx_ctx->ap->sglen);
334 350
335 if (len != AES_BLOCK_SIZE) 351 if (len != AES_BLOCK_SIZE) {
336 return -EINVAL; 352 rc = -EINVAL;
353 goto out;
354 }
337 355
338 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 356 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
339 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 357 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
372 .cra_blocksize = AES_BLOCK_SIZE, 390 .cra_blocksize = AES_BLOCK_SIZE,
373 .cra_module = THIS_MODULE, 391 .cra_module = THIS_MODULE,
374 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 392 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
375 .cra_init = nx_crypto_ctx_aes_xcbc_init, 393 .cra_init = nx_crypto_ctx_aes_xcbc_init2,
376 .cra_exit = nx_crypto_ctx_exit, 394 .cra_exit = nx_crypto_ctx_exit,
377 } 395 }
378}; 396};
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 4e91bdb83c59..becb738c897b 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -29,34 +29,28 @@
29#include "nx.h" 29#include "nx.h"
30 30
31 31
32static int nx_sha256_init(struct shash_desc *desc) 32static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
33{ 33{
34 struct sha256_state *sctx = shash_desc_ctx(desc); 34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
35 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 35 int err;
36 struct nx_sg *out_sg;
37 int len;
38 u32 max_sg_len;
39 36
40 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 37 err = nx_crypto_ctx_sha_init(tfm);
38 if (err)
39 return err;
41 40
42 memset(sctx, 0, sizeof *sctx); 41 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
43 42
44 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; 43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
45 44
46 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); 45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
47 46
48 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 47 return 0;
49 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 48}
50 max_sg_len = min_t(u64, max_sg_len,
51 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
52 49
53 len = SHA256_DIGEST_SIZE; 50static int nx_sha256_init(struct shash_desc *desc) {
54 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 51 struct sha256_state *sctx = shash_desc_ctx(desc);
55 &len, max_sg_len);
56 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
57 52
58 if (len != SHA256_DIGEST_SIZE) 53 memset(sctx, 0, sizeof *sctx);
59 return -EINVAL;
60 54
61 sctx->state[0] = __cpu_to_be32(SHA256_H0); 55 sctx->state[0] = __cpu_to_be32(SHA256_H0);
62 sctx->state[1] = __cpu_to_be32(SHA256_H1); 56 sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -77,7 +71,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
77 struct sha256_state *sctx = shash_desc_ctx(desc); 71 struct sha256_state *sctx = shash_desc_ctx(desc);
78 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
79 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
80 struct nx_sg *in_sg; 74 struct nx_sg *out_sg;
81 u64 to_process = 0, leftover, total; 75 u64 to_process = 0, leftover, total;
82 unsigned long irq_flags; 76 unsigned long irq_flags;
83 int rc = 0; 77 int rc = 0;
@@ -102,24 +96,28 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
102 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
103 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
104 98
105 in_sg = nx_ctx->in_sg;
106 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
107 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
108 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
109 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 102 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
110 103
104 data_len = SHA256_DIGEST_SIZE;
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
106 &data_len, max_sg_len);
107 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
108
109 if (data_len != SHA256_DIGEST_SIZE) {
110 rc = -EINVAL;
111 goto out;
112 }
113
111 do { 114 do {
112 /* 115 int used_sgs = 0;
113 * to_process: the SHA256_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
114 * this update. This value is also restricted by the sg list
115 * limits.
116 */
117 to_process = total - to_process;
118 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
119 117
120 if (buf_len) { 118 if (buf_len) {
121 data_len = buf_len; 119 data_len = buf_len;
122 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
123 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
124 &data_len, 122 &data_len,
125 max_sg_len); 123 max_sg_len);
@@ -128,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
128 rc = -EINVAL; 126 rc = -EINVAL;
129 goto out; 127 goto out;
130 } 128 }
129 used_sgs = in_sg - nx_ctx->in_sg;
131 } 130 }
132 131
132 /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
133 * processed in this iteration. This value is restricted
134 * by sg list limits and number of sgs we already used
135 * for leftover data. (see above)
136 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
137 * but because data may not be aligned, we need to account
138 * for that too. */
139 to_process = min_t(u64, total,
140 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
141 to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
142
133 data_len = to_process - buf_len; 143 data_len = to_process - buf_len;
134 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 144 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
135 &data_len, max_sg_len); 145 &data_len, max_sg_len);
136 146
137 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 147 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
138 148
139 to_process = (data_len + buf_len); 149 to_process = data_len + buf_len;
140 leftover = total - to_process; 150 leftover = total - to_process;
141 151
142 /* 152 /*
@@ -282,7 +292,7 @@ struct shash_alg nx_shash_sha256_alg = {
282 .cra_blocksize = SHA256_BLOCK_SIZE, 292 .cra_blocksize = SHA256_BLOCK_SIZE,
283 .cra_module = THIS_MODULE, 293 .cra_module = THIS_MODULE,
284 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 294 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
285 .cra_init = nx_crypto_ctx_sha_init, 295 .cra_init = nx_crypto_ctx_sha256_init,
286 .cra_exit = nx_crypto_ctx_exit, 296 .cra_exit = nx_crypto_ctx_exit,
287 } 297 }
288}; 298};
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index e6a58d2ee628..b6e183d58d73 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -28,34 +28,29 @@
28#include "nx.h" 28#include "nx.h"
29 29
30 30
31static int nx_sha512_init(struct shash_desc *desc) 31static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
32{ 32{
33 struct sha512_state *sctx = shash_desc_ctx(desc); 33 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 34 int err;
35 struct nx_sg *out_sg;
36 int len;
37 u32 max_sg_len;
38 35
39 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 36 err = nx_crypto_ctx_sha_init(tfm);
37 if (err)
38 return err;
40 39
41 memset(sctx, 0, sizeof *sctx); 40 nx_ctx_init(nx_ctx, HCOP_FC_SHA);
42 41
43 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; 42 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
44 43
45 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); 44 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
46 45
47 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 46 return 0;
48 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 47}
49 max_sg_len = min_t(u64, max_sg_len,
50 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
51 48
52 len = SHA512_DIGEST_SIZE; 49static int nx_sha512_init(struct shash_desc *desc)
53 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 50{
54 &len, max_sg_len); 51 struct sha512_state *sctx = shash_desc_ctx(desc);
55 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
56 52
57 if (len != SHA512_DIGEST_SIZE) 53 memset(sctx, 0, sizeof *sctx);
58 return -EINVAL;
59 54
60 sctx->state[0] = __cpu_to_be64(SHA512_H0); 55 sctx->state[0] = __cpu_to_be64(SHA512_H0);
61 sctx->state[1] = __cpu_to_be64(SHA512_H1); 56 sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -76,7 +71,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
76 struct sha512_state *sctx = shash_desc_ctx(desc); 71 struct sha512_state *sctx = shash_desc_ctx(desc);
77 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 72 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
78 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 73 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
79 struct nx_sg *in_sg; 74 struct nx_sg *out_sg;
80 u64 to_process, leftover = 0, total; 75 u64 to_process, leftover = 0, total;
81 unsigned long irq_flags; 76 unsigned long irq_flags;
82 int rc = 0; 77 int rc = 0;
@@ -101,25 +96,28 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
101 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 96 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
102 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 97 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
103 98
104 in_sg = nx_ctx->in_sg;
105 max_sg_len = min_t(u64, nx_ctx->ap->sglen, 99 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
106 nx_driver.of.max_sg_len/sizeof(struct nx_sg)); 100 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
107 max_sg_len = min_t(u64, max_sg_len, 101 max_sg_len = min_t(u64, max_sg_len,
108 nx_ctx->ap->databytelen/NX_PAGE_SIZE); 102 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
109 103
104 data_len = SHA512_DIGEST_SIZE;
105 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
106 &data_len, max_sg_len);
107 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
108
109 if (data_len != SHA512_DIGEST_SIZE) {
110 rc = -EINVAL;
111 goto out;
112 }
113
110 do { 114 do {
111 /* 115 int used_sgs = 0;
112 * to_process: the SHA512_BLOCK_SIZE data chunk to process in 116 struct nx_sg *in_sg = nx_ctx->in_sg;
113 * this update. This value is also restricted by the sg list
114 * limits.
115 */
116 to_process = total - leftover;
117 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
118 leftover = total - to_process;
119 117
120 if (buf_len) { 118 if (buf_len) {
121 data_len = buf_len; 119 data_len = buf_len;
122 in_sg = nx_build_sg_list(nx_ctx->in_sg, 120 in_sg = nx_build_sg_list(in_sg,
123 (u8 *) sctx->buf, 121 (u8 *) sctx->buf,
124 &data_len, max_sg_len); 122 &data_len, max_sg_len);
125 123
@@ -127,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
127 rc = -EINVAL; 125 rc = -EINVAL;
128 goto out; 126 goto out;
129 } 127 }
128 used_sgs = in_sg - nx_ctx->in_sg;
130 } 129 }
131 130
131 /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
132 * processed in this iteration. This value is restricted
133 * by sg list limits and number of sgs we already used
134 * for leftover data. (see above)
135 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
136 * but because data may not be aligned, we need to account
137 * for that too. */
138 to_process = min_t(u64, total,
139 (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
140 to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
141
132 data_len = to_process - buf_len; 142 data_len = to_process - buf_len;
133 in_sg = nx_build_sg_list(in_sg, (u8 *) data, 143 in_sg = nx_build_sg_list(in_sg, (u8 *) data,
134 &data_len, max_sg_len); 144 &data_len, max_sg_len);
@@ -140,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
140 goto out; 150 goto out;
141 } 151 }
142 152
143 to_process = (data_len + buf_len); 153 to_process = data_len + buf_len;
144 leftover = total - to_process; 154 leftover = total - to_process;
145 155
146 /* 156 /*
@@ -288,7 +298,7 @@ struct shash_alg nx_shash_sha512_alg = {
288 .cra_blocksize = SHA512_BLOCK_SIZE, 298 .cra_blocksize = SHA512_BLOCK_SIZE,
289 .cra_module = THIS_MODULE, 299 .cra_module = THIS_MODULE,
290 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 300 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
291 .cra_init = nx_crypto_ctx_sha_init, 301 .cra_init = nx_crypto_ctx_sha512_init,
292 .cra_exit = nx_crypto_ctx_exit, 302 .cra_exit = nx_crypto_ctx_exit,
293 } 303 }
294}; 304};
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index f6198f29a4a8..436971343ff7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -713,12 +713,15 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
713/* entry points from the crypto tfm initializers */ 713/* entry points from the crypto tfm initializers */
714int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm) 714int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
715{ 715{
716 crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
717 sizeof(struct nx_ccm_rctx));
716 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, 718 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
717 NX_MODE_AES_CCM); 719 NX_MODE_AES_CCM);
718} 720}
719 721
720int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) 722int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
721{ 723{
724 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
722 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, 725 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
723 NX_MODE_AES_GCM); 726 NX_MODE_AES_GCM);
724} 727}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index de3ea8738146..cdff03a42ae7 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -2,6 +2,8 @@
2#ifndef __NX_H__ 2#ifndef __NX_H__
3#define __NX_H__ 3#define __NX_H__
4 4
5#include <crypto/ctr.h>
6
5#define NX_NAME "nx-crypto" 7#define NX_NAME "nx-crypto"
6#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver" 8#define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
7#define NX_VERSION "1.0" 9#define NX_VERSION "1.0"
@@ -91,8 +93,11 @@ struct nx_crypto_driver {
91 93
92#define NX_GCM4106_NONCE_LEN (4) 94#define NX_GCM4106_NONCE_LEN (4)
93#define NX_GCM_CTR_OFFSET (12) 95#define NX_GCM_CTR_OFFSET (12)
94struct nx_gcm_priv { 96struct nx_gcm_rctx {
95 u8 iv[16]; 97 u8 iv[16];
98};
99
100struct nx_gcm_priv {
96 u8 iauth_tag[16]; 101 u8 iauth_tag[16];
97 u8 nonce[NX_GCM4106_NONCE_LEN]; 102 u8 nonce[NX_GCM4106_NONCE_LEN];
98}; 103};
@@ -100,8 +105,11 @@ struct nx_gcm_priv {
100#define NX_CCM_AES_KEY_LEN (16) 105#define NX_CCM_AES_KEY_LEN (16)
101#define NX_CCM4309_AES_KEY_LEN (19) 106#define NX_CCM4309_AES_KEY_LEN (19)
102#define NX_CCM4309_NONCE_LEN (3) 107#define NX_CCM4309_NONCE_LEN (3)
103struct nx_ccm_priv { 108struct nx_ccm_rctx {
104 u8 iv[16]; 109 u8 iv[16];
110};
111
112struct nx_ccm_priv {
105 u8 b0[16]; 113 u8 b0[16];
106 u8 iauth_tag[16]; 114 u8 iauth_tag[16];
107 u8 oauth_tag[16]; 115 u8 oauth_tag[16];
@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
113}; 121};
114 122
115struct nx_ctr_priv { 123struct nx_ctr_priv {
116 u8 iv[16]; 124 u8 nonce[CTR_RFC3686_NONCE_SIZE];
117}; 125};
118 126
119struct nx_crypto_ctx { 127struct nx_crypto_ctx {
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 46307098f8ba..0a70e46d5416 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
536 dmaengine_terminate_all(dd->dma_lch_in); 536 dmaengine_terminate_all(dd->dma_lch_in);
537 dmaengine_terminate_all(dd->dma_lch_out); 537 dmaengine_terminate_all(dd->dma_lch_out);
538 538
539 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
540 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
541
542 return err; 539 return err;
543} 540}
544 541
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..df427c0e9e7b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT) 74 ICP_QAT_HW_CIPHER_DECRYPT)
75 75
76static atomic_t active_dev; 76static DEFINE_MUTEX(algs_lock);
77static unsigned int active_devs;
77 78
78struct qat_alg_buf { 79struct qat_alg_buf {
79 uint32_t len; 80 uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
1280 1281
1281int qat_algs_register(void) 1282int qat_algs_register(void)
1282{ 1283{
1283 if (atomic_add_return(1, &active_dev) == 1) { 1284 int ret = 0;
1285
1286 mutex_lock(&algs_lock);
1287 if (++active_devs == 1) {
1284 int i; 1288 int i;
1285 1289
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++) 1290 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
1289 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : 1293 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1290 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 1294 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291 1295
1292 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1296 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 } 1297 }
1294 return 0; 1298 mutex_unlock(&algs_lock);
1299 return ret;
1295} 1300}
1296 1301
1297int qat_algs_unregister(void) 1302int qat_algs_unregister(void)
1298{ 1303{
1299 if (atomic_sub_return(1, &active_dev) == 0) 1304 int ret = 0;
1300 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1305
1301 return 0; 1306 mutex_lock(&algs_lock);
1307 if (--active_devs == 0)
1308 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309 mutex_unlock(&algs_lock);
1310 return ret;
1302} 1311}
1303 1312
1304int qat_algs_init(void) 1313int qat_algs_init(void)
1305{ 1314{
1306 atomic_set(&active_dev, 0);
1307 crypto_get_default_rng(); 1315 crypto_get_default_rng();
1308 return 0; 1316 return 0;
1309} 1317}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50 50
51#define ATC_MAX_DSCR_TRIALS 10
52
51/* 53/*
52 * Initial number of descriptors to allocate for each channel. This could 54 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage. 55 * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
285 * 287 *
286 * @current_len: the number of bytes left before reading CTRLA 288 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA 289 * @ctrla: the value of CTRLA
288 * @desc: the descriptor containing the transfer width
289 */ 290 */
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 struct at_desc *desc)
292{ 292{
293 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294} 294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 295
296/** 296 /*
297 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 297 * According to the datasheet, when reading the Control A Register
298 * to the current value of CTRLA. 298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * 299 * number of transfers completed on the Source Interface.
300 * @current_len: the number of bytes left before reading CTRLA 300 * So btsize is always a number of source width transfers.
301 * @atchan: the channel to read CTRLA for 301 */
302 * @desc: the descriptor containing the transfer width 302 return current_len - (btsize << src_width);
303 */
304static inline int atc_calc_bytes_left_from_reg(int current_len,
305 struct at_dma_chan *atchan, struct at_desc *desc)
306{
307 u32 ctrla = channel_readl(atchan, CTRLA);
308
309 return atc_calc_bytes_left(current_len, ctrla, desc);
310} 303}
311 304
312/** 305/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
320 struct at_desc *desc_first = atc_first_active(atchan); 313 struct at_desc *desc_first = atc_first_active(atchan);
321 struct at_desc *desc; 314 struct at_desc *desc;
322 int ret; 315 int ret;
323 u32 ctrla, dscr; 316 u32 ctrla, dscr, trials;
324 317
325 /* 318 /*
326 * If the cookie doesn't match to the currently running transfer then 319 * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
346 * the channel's DSCR register and compare it against the value 339 * the channel's DSCR register and compare it against the value
347 * of the hardware linked list structure of each child 340 * of the hardware linked list structure of each child
348 * descriptor. 341 * descriptor.
342 *
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
347 * data.
348 *
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
354 * reads.
355 *
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
368 *
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
375 * is "not so fast".
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
380 * DSCR.
349 */ 381 */
350 382
351 ctrla = channel_readl(atchan, CTRLA);
352 rmb(); /* ensure CTRLA is read before DSCR */
353 dscr = channel_readl(atchan, DSCR); 383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
391
392 /*
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
397 */
398 if (likely(new_dscr == dscr))
399 break;
400
401 /*
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
406 * descriptor.
407 */
408 dscr = new_dscr;
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
354 414
355 /* for the first descriptor we can be more accurate */ 415 /* for the first descriptor we can be more accurate */
356 if (desc_first->lli.dscr == dscr) 416 if (desc_first->lli.dscr == dscr)
357 return atc_calc_bytes_left(ret, ctrla, desc_first); 417 return atc_calc_bytes_left(ret, ctrla);
358 418
359 ret -= desc_first->len; 419 ret -= desc_first->len;
360 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
365 } 425 }
366 426
367 /* 427 /*
368 * For the last descriptor in the chain we can calculate 428 * For the current descriptor in the chain we can calculate
369 * the remaining bytes using the channel's register. 429 * the remaining bytes using the channel's register.
370 * Note that the transfer width of the first and last
371 * descriptor may differ.
372 */ 430 */
373 if (!desc->lli.dscr) 431 ret = atc_calc_bytes_left(ret, ctrla);
374 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375 } else { 432 } else {
376 /* single transfer */ 433 /* single transfer */
377 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
378 } 436 }
379 437
380 return ret; 438 return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
726 784
727 desc->txd.cookie = -EBUSY; 785 desc->txd.cookie = -EBUSY;
728 desc->total_len = desc->len = len; 786 desc->total_len = desc->len = len;
729 desc->tx_width = dwidth;
730 787
731 /* set end-of-link to the last link descriptor of list*/ 788 /* set end-of-link to the last link descriptor of list*/
732 set_desc_eol(desc); 789 set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
804 first->txd.cookie = -EBUSY; 861 first->txd.cookie = -EBUSY;
805 first->total_len = len; 862 first->total_len = len;
806 863
807 /* set transfer width for the calculation of the residue */
808 first->tx_width = src_width;
809 prev->tx_width = src_width;
810
811 /* set end-of-link to the last link descriptor of list*/ 864 /* set end-of-link to the last link descriptor of list*/
812 set_desc_eol(desc); 865 set_desc_eol(desc);
813 866
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
956 first->txd.cookie = -EBUSY; 1009 first->txd.cookie = -EBUSY;
957 first->total_len = total_len; 1010 first->total_len = total_len;
958 1011
959 /* set transfer width for the calculation of the residue */
960 first->tx_width = reg_width;
961 prev->tx_width = reg_width;
962
963 /* first link descriptor of list is responsible of flags */ 1012 /* first link descriptor of list is responsible of flags */
964 first->txd.flags = flags; /* client is in control of this ack */ 1013 first->txd.flags = flags; /* client is in control of this ack */
965 1014
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
1077 desc->txd.cookie = 0; 1126 desc->txd.cookie = 0;
1078 desc->len = len; 1127 desc->len = len;
1079 1128
1080 /*
1081 * Although we only need the transfer width for the first and
1082 * the last descriptor, its easier to set it to all descriptors.
1083 */
1084 desc->tx_width = src_width;
1085
1086 atc_desc_chain(&first, &prev, desc); 1129 atc_desc_chain(&first, &prev, desc);
1087 1130
1088 /* update the lengths and addresses for the next loop cycle */ 1131 /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1256 /* First descriptor of the chain embedds additional information */ 1299 /* First descriptor of the chain embedds additional information */
1257 first->txd.cookie = -EBUSY; 1300 first->txd.cookie = -EBUSY;
1258 first->total_len = buf_len; 1301 first->total_len = buf_len;
1259 first->tx_width = reg_width;
1260 1302
1261 return &first->txd; 1303 return &first->txd;
1262 1304
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..7f5a08230f76 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
112#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) 113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
114#define ATC_SRC_WIDTH_WORD (0x2 << 24) 114#define ATC_SRC_WIDTH_WORD (0x2 << 24)
115#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
115#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ 116#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
116#define ATC_DST_WIDTH(x) ((x) << 28) 117#define ATC_DST_WIDTH(x) ((x) << 28)
117#define ATC_DST_WIDTH_BYTE (0x0 << 28) 118#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
182 * @txd: support for the async_tx api 183 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 184 * @desc_node: node on the channed descriptors list
184 * @len: descriptor byte count 185 * @len: descriptor byte count
185 * @tx_width: transfer width
186 * @total_len: total transaction byte count 186 * @total_len: total transaction byte count
187 */ 187 */
188struct at_desc { 188struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
194 struct dma_async_tx_descriptor txd; 194 struct dma_async_tx_descriptor txd;
195 struct list_head desc_node; 195 struct list_head desc_node;
196 size_t len; 196 size_t len;
197 u32 tx_width;
198 size_t total_len; 197 size_t total_len;
199 198
200 /* Interleaved data */ 199 /* Interleaved data */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..40afa2a16cfc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
359 * descriptor view 2 since some fields of the configuration register 359 * descriptor view 2 since some fields of the configuration register
360 * depend on transfer size and src/dest addresses. 360 * depend on transfer size and src/dest addresses.
361 */ 361 */
362 if (at_xdmac_chan_is_cyclic(atchan)) { 362 if (at_xdmac_chan_is_cyclic(atchan))
363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
364 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); 364 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
365 } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV3; 365 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 } else { 366 else
368 /*
369 * No need to write AT_XDMAC_CC reg, it will be done when the
370 * descriptor is fecthed.
371 */
372 reg = AT_XDMAC_CNDC_NDVIEW_NDV2; 367 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
373 } 368 /*
369 * Even if the register will be updated from the configuration in the
370 * descriptor when using view 2 or higher, the PROT bit won't be set
371 * properly. This bit can be modified only by using the channel
372 * configuration register.
373 */
374 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
374 375
375 reg |= AT_XDMAC_CNDC_NDDUP 376 reg |= AT_XDMAC_CNDC_NDDUP
376 | AT_XDMAC_CNDC_NDSUP 377 | AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
681 desc->lld.mbr_sa = mem; 682 desc->lld.mbr_sa = mem;
682 desc->lld.mbr_da = atchan->sconfig.dst_addr; 683 desc->lld.mbr_da = atchan->sconfig.dst_addr;
683 } 684 }
684 desc->lld.mbr_cfg = atchan->cfg; 685 dwidth = at_xdmac_get_dwidth(atchan->cfg);
685 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
687 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 687 ? dwidth
688 : AT_XDMAC_CC_DWIDTH_BYTE; 688 : AT_XDMAC_CC_DWIDTH_BYTE;
689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ 689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
692 | (len >> fixed_dwidth); /* microblock length */ 692 | (len >> fixed_dwidth); /* microblock length */
693 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
694 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
693 dev_dbg(chan2dev(chan), 695 dev_dbg(chan2dev(chan),
694 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 696 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
695 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 697 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce15f25d..3ff284c8e3d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); 689 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
690 if (IS_ERR(ch)) 690 if (IS_ERR(ch))
691 return NULL; 691 return NULL;
692
693 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
694 ch->device->privatecnt++;
695
692 return ch; 696 return ch;
693} 697}
694EXPORT_SYMBOL_GPL(dma_request_slave_channel); 698EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 1022c2e1a2b0..cf1c87fa1edd 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable);
1746MODULE_LICENSE("GPL v2"); 1746MODULE_LICENSE("GPL v2");
1747MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); 1747MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1748MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1748MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1749MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 1749MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..f1325f62563e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
162 config &= ~0x7; 162 config &= ~0x7;
163 config |= op_mode; 163 config |= op_mode;
164 164
165 if (IS_ENABLED(__BIG_ENDIAN)) 165#if defined(__BIG_ENDIAN)
166 config |= XOR_DESCRIPTOR_SWAP; 166 config |= XOR_DESCRIPTOR_SWAP;
167 else 167#else
168 config &= ~XOR_DESCRIPTOR_SWAP; 168 config &= ~XOR_DESCRIPTOR_SWAP;
169#endif
169 170
170 writel_relaxed(config, XOR_CONFIG(chan)); 171 writel_relaxed(config, XOR_CONFIG(chan));
171 chan->current_type = type; 172 chan->current_type = type;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..ecab4ea059b4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2328 desc->txd.callback = last->txd.callback; 2328 desc->txd.callback = last->txd.callback;
2329 desc->txd.callback_param = last->txd.callback_param; 2329 desc->txd.callback_param = last->txd.callback_param;
2330 } 2330 }
2331 last->last = false; 2331 desc->last = false;
2332 2332
2333 dma_cookie_assign(&desc->txd); 2333 dma_cookie_assign(&desc->txd);
2334 2334
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2623 desc->rqcfg.brst_len = 1; 2623 desc->rqcfg.brst_len = 1;
2624 2624
2625 desc->rqcfg.brst_len = get_burst_len(desc, len); 2625 desc->rqcfg.brst_len = get_burst_len(desc, len);
2626 desc->bytes_requested = len;
2626 2627
2627 desc->txd.flags = flags; 2628 desc->txd.flags = flags;
2628 2629
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_move_tail(&vd->node, &vc->desc_submitted); 32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 83 cb_data = vd->tx.callback_param;
84 84
85 list_del(&vd->node); 85 list_del(&vd->node);
86 if (async_tx_test_ack(&vd->tx)) 86
87 list_add(&vd->node, &vc->desc_allocated); 87 vc->desc_free(vd);
88 else
89 vc->desc_free(vd);
90 88
91 if (cb) 89 if (cb)
92 cb(cb_data); 90 cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
98 while (!list_empty(head)) { 96 while (!list_empty(head)) {
99 struct virt_dma_desc *vd = list_first_entry(head, 97 struct virt_dma_desc *vd = list_first_entry(head,
100 struct virt_dma_desc, node); 98 struct virt_dma_desc, node);
101 if (async_tx_test_ack(&vd->tx)) { 99 list_del(&vd->node);
102 list_move_tail(&vd->node, &vc->desc_allocated); 100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
103 } else { 101 vc->desc_free(vd);
104 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
105 list_del(&vd->node);
106 vc->desc_free(vd);
107 }
108 } 102 }
109} 103}
110EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
114 dma_cookie_init(&vc->chan); 108 dma_cookie_init(&vc->chan);
115 109
116 spin_lock_init(&vc->lock); 110 spin_lock_init(&vc->lock);
117 INIT_LIST_HEAD(&vc->desc_allocated);
118 INIT_LIST_HEAD(&vc->desc_submitted); 111 INIT_LIST_HEAD(&vc->desc_submitted);
119 INIT_LIST_HEAD(&vc->desc_issued); 112 INIT_LIST_HEAD(&vc->desc_issued);
120 INIT_LIST_HEAD(&vc->desc_completed); 113 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
33 struct list_head desc_submitted; 32 struct list_head desc_submitted;
34 struct list_head desc_issued; 33 struct list_head desc_issued;
35 struct list_head desc_completed; 34 struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
56 struct virt_dma_desc *vd, unsigned long tx_flags) 55 struct virt_dma_desc *vd, unsigned long tx_flags)
57{ 56{
58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 unsigned long flags;
60 58
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags; 60 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit; 61 vd->tx.tx_submit = vchan_tx_submit;
64 62
65 spin_lock_irqsave(&vc->lock, flags);
66 list_add_tail(&vd->node, &vc->desc_allocated);
67 spin_unlock_irqrestore(&vc->lock, flags);
68
69 return &vd->tx; 63 return &vd->tx;
70} 64}
71 65
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
128} 122}
129 123
130/** 124/**
131 * vchan_get_all_descriptors - obtain all allocated, submitted and issued 125 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
132 * descriptors
133 * vc: virtual channel to get descriptors from 126 * vc: virtual channel to get descriptors from
134 * head: list of descriptors found 127 * head: list of descriptors found
135 * 128 *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
141static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
142 struct list_head *head) 135 struct list_head *head)
143{ 136{
144 list_splice_tail_init(&vc->desc_allocated, head);
145 list_splice_tail_init(&vc->desc_submitted, head); 137 list_splice_tail_init(&vc->desc_submitted, head);
146 list_splice_tail_init(&vc->desc_issued, head); 138 list_splice_tail_init(&vc->desc_issued, head);
147 list_splice_tail_init(&vc->desc_completed, head); 139 list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
149 141
150static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
151{ 143{
152 struct virt_dma_desc *vd;
153 unsigned long flags; 144 unsigned long flags;
154 LIST_HEAD(head); 145 LIST_HEAD(head);
155 146
156 spin_lock_irqsave(&vc->lock, flags); 147 spin_lock_irqsave(&vc->lock, flags);
157 vchan_get_all_descriptors(vc, &head); 148 vchan_get_all_descriptors(vc, &head);
158 list_for_each_entry(vd, &head, node)
159 async_tx_clear_ack(&vd->tx);
160 spin_unlock_irqrestore(&vc->lock, flags); 149 spin_unlock_irqrestore(&vc->lock, flags);
161 150
162 vchan_dma_desc_free_list(vc, &head); 151 vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..dff22ab01851 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112#define XGENE_DMA_BLK_MEM_RDY 0xD074 112#define XGENE_DMA_BLK_MEM_RDY 0xD074
113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF 113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
114 115
115/* X-Gene SoC EFUSE csr register and bit defination */ 116/* X-Gene SoC EFUSE csr register and bit defination */
116#define XGENE_SOC_JTAG1_SHADOW 0x18 117#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
1887 return -ENOMEM; 1888 return -ENOMEM;
1888 } 1889 }
1889 1890
1891 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892
1890 /* Get efuse csr region */ 1893 /* Get efuse csr region */
1891 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1892 if (!res) { 1895 if (!res) {
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
920 */ 920 */
921 921
922 for (row = 0; row < mci->nr_csrows; row++) { 922 for (row = 0; row < mci->nr_csrows; row++) {
923 struct csrow_info *csi = &mci->csrows[row]; 923 struct csrow_info *csi = mci->csrows[row];
924 924
925 /* 925 /*
926 * Get the configuration settings for this 926 * Get the configuration settings for this
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..eebdf2a33bfe 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); 200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
201 if (status) { 201 if (status) {
202 dev_err(&pdev->dev, "failed to register extcon device\n"); 202 dev_err(&pdev->dev, "failed to register extcon device\n");
203 kfree(palmas_usb->edev->name);
204 return status; 203 return status;
205 } 204 }
206 205
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
214 if (status < 0) { 213 if (status < 0) {
215 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 214 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
216 palmas_usb->id_irq, status); 215 palmas_usb->id_irq, status);
217 kfree(palmas_usb->edev->name);
218 return status; 216 return status;
219 } 217 }
220 } 218 }
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
229 if (status < 0) { 227 if (status < 0) {
230 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 228 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
231 palmas_usb->vbus_irq, status); 229 palmas_usb->vbus_irq, status);
232 kfree(palmas_usb->edev->name);
233 return status; 230 return status;
234 } 231 }
235 } 232 }
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
239 return 0; 236 return 0;
240} 237}
241 238
242static int palmas_usb_remove(struct platform_device *pdev)
243{
244 struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
245
246 kfree(palmas_usb->edev->name);
247
248 return 0;
249}
250
251#ifdef CONFIG_PM_SLEEP 239#ifdef CONFIG_PM_SLEEP
252static int palmas_usb_suspend(struct device *dev) 240static int palmas_usb_suspend(struct device *dev)
253{ 241{
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
288 276
289static struct platform_driver palmas_usb_driver = { 277static struct platform_driver palmas_usb_driver = {
290 .probe = palmas_usb_probe, 278 .probe = palmas_usb_probe,
291 .remove = palmas_usb_remove,
292 .driver = { 279 .driver = {
293 .name = "palmas-usb", 280 .name = "palmas-usb",
294 .of_match_table = of_palmas_match_tbl, 281 .of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..43b57b02d050 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
124 return -EINVAL; 124 return -EINVAL;
125} 125}
126 126
127static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) 127static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
128{ 128{
129 unsigned int id = EXTCON_NONE; 129 unsigned int id = -EINVAL;
130 int i = 0; 130 int i = 0;
131 131
132 if (edev->max_supported == 0) 132 /* Find the id of extcon cable */
133 return -EINVAL;
134
135 /* Find the the number of extcon cable */
136 while (extcon_name[i]) { 133 while (extcon_name[i]) {
137 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { 134 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
138 id = i; 135 id = i;
139 break; 136 break;
140 } 137 }
138 i++;
141 } 139 }
142 140
143 if (id == EXTCON_NONE) 141 return id;
142}
143
144static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
145{
146 unsigned int id;
147
148 if (edev->max_supported == 0)
144 return -EINVAL; 149 return -EINVAL;
145 150
151 /* Find the the number of extcon cable */
152 id = find_cable_id_by_name(edev, name);
153 if (id < 0)
154 return id;
155
146 return find_cable_index_by_id(edev, id); 156 return find_cable_index_by_id(edev, id);
147} 157}
148 158
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
228 struct extcon_cable *cable = container_of(attr, struct extcon_cable, 238 struct extcon_cable *cable = container_of(attr, struct extcon_cable,
229 attr_state); 239 attr_state);
230 240
241 int i = cable->cable_index;
242
231 return sprintf(buf, "%d\n", 243 return sprintf(buf, "%d\n",
232 extcon_get_cable_state_(cable->edev, 244 extcon_get_cable_state_(cable->edev,
233 cable->cable_index)); 245 cable->edev->supported_cable[i]));
234} 246}
235 247
236/** 248/**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
263 spin_lock_irqsave(&edev->lock, flags); 275 spin_lock_irqsave(&edev->lock, flags);
264 276
265 if (edev->state != ((edev->state & ~mask) | (state & mask))) { 277 if (edev->state != ((edev->state & ~mask) | (state & mask))) {
278 u32 old_state;
279
266 if (check_mutually_exclusive(edev, (edev->state & ~mask) | 280 if (check_mutually_exclusive(edev, (edev->state & ~mask) |
267 (state & mask))) { 281 (state & mask))) {
268 spin_unlock_irqrestore(&edev->lock, flags); 282 spin_unlock_irqrestore(&edev->lock, flags);
269 return -EPERM; 283 return -EPERM;
270 } 284 }
271 285
272 for (index = 0; index < edev->max_supported; index++) { 286 old_state = edev->state;
273 if (is_extcon_changed(edev->state, state, index, &attached))
274 raw_notifier_call_chain(&edev->nh[index], attached, edev);
275 }
276
277 edev->state &= ~mask; 287 edev->state &= ~mask;
278 edev->state |= state & mask; 288 edev->state |= state & mask;
279 289
290 for (index = 0; index < edev->max_supported; index++) {
291 if (is_extcon_changed(old_state, edev->state, index,
292 &attached))
293 raw_notifier_call_chain(&edev->nh[index],
294 attached, edev);
295 }
296
280 /* This could be in interrupt handler */ 297 /* This could be in interrupt handler */
281 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); 298 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
282 if (prop_buf) { 299 if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
361 */ 378 */
362int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) 379int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
363{ 380{
364 return extcon_get_cable_state_(edev, find_cable_index_by_name 381 unsigned int id;
365 (edev, cable_name)); 382
383 id = find_cable_id_by_name(edev, cable_name);
384 if (id < 0)
385 return id;
386
387 return extcon_get_cable_state_(edev, id);
366} 388}
367EXPORT_SYMBOL_GPL(extcon_get_cable_state); 389EXPORT_SYMBOL_GPL(extcon_get_cable_state);
368 390
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
404int extcon_set_cable_state(struct extcon_dev *edev, 426int extcon_set_cable_state(struct extcon_dev *edev,
405 const char *cable_name, bool cable_state) 427 const char *cable_name, bool cable_state)
406{ 428{
407 return extcon_set_cable_state_(edev, find_cable_index_by_name 429 unsigned int id;
408 (edev, cable_name), cable_state); 430
431 id = find_cable_id_by_name(edev, cable_name);
432 if (id < 0)
433 return id;
434
435 return extcon_set_cable_state_(edev, id, cable_state);
409} 436}
410EXPORT_SYMBOL_GPL(extcon_set_cable_state); 437EXPORT_SYMBOL_GPL(extcon_set_cable_state);
411 438
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3fdce52..e41594510b97 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
245} 245}
246EXPORT_SYMBOL(bcm47xx_nvram_get_contents); 246EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
247 247
248MODULE_LICENSE("GPLv2"); 248MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
305 return ret; 305 return ret;
306} 306}
307 307
308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
309 int len)
309{ 310{
310 struct cper_mem_err_compact cmem; 311 struct cper_mem_err_compact cmem;
311 312
313 /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
314 if (len == sizeof(struct cper_sec_mem_err_old) &&
315 (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
316 pr_err(FW_WARN "valid bits set for fields beyond structure\n");
317 return;
318 }
312 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 319 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
313 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 320 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
314 if (mem->validation_bits & CPER_MEM_VALID_PA) 321 if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
405 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
406 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 413 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
407 printk("%s""section_type: memory error\n", newpfx); 414 printk("%s""section_type: memory error\n", newpfx);
408 if (gdata->error_data_length >= sizeof(*mem_err)) 415 if (gdata->error_data_length >=
409 cper_print_mem(newpfx, mem_err); 416 sizeof(struct cper_sec_mem_err_old))
417 cper_print_mem(newpfx, mem_err,
418 gdata->error_data_length);
410 else 419 else
411 goto err_section_too_small; 420 goto err_section_too_small;
412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { 421 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
58 58
59static int __init parse_efi_cmdline(char *str) 59static int __init parse_efi_cmdline(char *str)
60{ 60{
61 if (!str) {
62 pr_warn("need at least one option\n");
63 return -EINVAL;
64 }
65
61 if (parse_option_str(str, "noruntime")) 66 if (parse_option_str(str, "noruntime"))
62 disable_runtime = true; 67 disable_runtime = true;
63 68
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 7a3cb1fa0a76..4630a8133ea6 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -87,6 +87,15 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
87 struct brcmstb_gpio_bank *bank; 87 struct brcmstb_gpio_bank *bank;
88 int ret = 0; 88 int ret = 0;
89 89
90 if (!priv) {
91 dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
92 return -EFAULT;
93 }
94
95 /*
96 * You can lose return values below, but we report all errors, and it's
97 * more important to actually perform all of the steps.
98 */
90 list_for_each(pos, &priv->bank_list) { 99 list_for_each(pos, &priv->bank_list) {
91 bank = list_entry(pos, struct brcmstb_gpio_bank, node); 100 bank = list_entry(pos, struct brcmstb_gpio_bank, node);
92 ret = bgpio_remove(&bank->bgc); 101 ret = bgpio_remove(&bank->bgc);
@@ -143,6 +152,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
143 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 152 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
144 if (!priv) 153 if (!priv)
145 return -ENOMEM; 154 return -ENOMEM;
155 platform_set_drvdata(pdev, priv);
156 INIT_LIST_HEAD(&priv->bank_list);
146 157
147 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 158 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
148 reg_base = devm_ioremap_resource(dev, res); 159 reg_base = devm_ioremap_resource(dev, res);
@@ -153,7 +164,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
153 priv->reg_base = reg_base; 164 priv->reg_base = reg_base;
154 priv->pdev = pdev; 165 priv->pdev = pdev;
155 166
156 INIT_LIST_HEAD(&priv->bank_list);
157 if (brcmstb_gpio_sanity_check_banks(dev, np, res)) 167 if (brcmstb_gpio_sanity_check_banks(dev, np, res))
158 return -EINVAL; 168 return -EINVAL;
159 169
@@ -221,8 +231,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
221 dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n", 231 dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
222 priv->num_banks, priv->gpio_base, gpio_base - 1); 232 priv->num_banks, priv->gpio_base, gpio_base - 1);
223 233
224 platform_set_drvdata(pdev, priv);
225
226 return 0; 234 return 0;
227 235
228fail: 236fail:
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c5e05c82d67c..c246ac3dda7c 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -578,15 +578,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
578 writel_relaxed(~0, &g->clr_falling); 578 writel_relaxed(~0, &g->clr_falling);
579 writel_relaxed(~0, &g->clr_rising); 579 writel_relaxed(~0, &g->clr_rising);
580 580
581 /* set up all irqs in this bank */
582 irq_set_chained_handler(bank_irq, gpio_irq_handler);
583
584 /* 581 /*
585 * Each chip handles 32 gpios, and each irq bank consists of 16 582 * Each chip handles 32 gpios, and each irq bank consists of 16
586 * gpio irqs. Pass the irq bank's corresponding controller to 583 * gpio irqs. Pass the irq bank's corresponding controller to
587 * the chained irq handler. 584 * the chained irq handler.
588 */ 585 */
589 irq_set_handler_data(bank_irq, &chips[gpio / 32]); 586 irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
587 &chips[gpio / 32]);
590 588
591 binten |= BIT(bank); 589 binten |= BIT(bank);
592 } 590 }
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index aed4ca9338bc..7d3c90e9da71 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
603 gc->base = gpio_start; 603 gc->base = gpio_start;
604 gc->ngpio = port; 604 gc->ngpio = port;
605 gc->label = chip->client->name; 605 gc->label = chip->client->name;
606 gc->dev = &chip->client->dev;
606 gc->owner = THIS_MODULE; 607 gc->owner = THIS_MODULE;
607 608
608 return port; 609 return port;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b0c57d505be7..61a731ff9a07 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
500 500
501 spin_lock_irqsave(&bank->lock, flags); 501 spin_lock_irqsave(&bank->lock, flags);
502 retval = omap_set_gpio_triggering(bank, offset, type); 502 retval = omap_set_gpio_triggering(bank, offset, type);
503 if (retval) 503 if (retval) {
504 spin_unlock_irqrestore(&bank->lock, flags);
504 goto error; 505 goto error;
506 }
505 omap_gpio_init_irq(bank, offset); 507 omap_gpio_init_irq(bank, offset);
506 if (!omap_gpio_is_input(bank, offset)) { 508 if (!omap_gpio_is_input(bank, offset)) {
507 spin_unlock_irqrestore(&bank->lock, flags); 509 spin_unlock_irqrestore(&bank->lock, flags);
@@ -1185,6 +1187,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
1185 bank->irq = res->start; 1187 bank->irq = res->start;
1186 bank->dev = dev; 1188 bank->dev = dev;
1187 bank->chip.dev = dev; 1189 bank->chip.dev = dev;
1190 bank->chip.owner = THIS_MODULE;
1188 bank->dbck_flag = pdata->dbck_flag; 1191 bank->dbck_flag = pdata->dbck_flag;
1189 bank->stride = pdata->bank_stride; 1192 bank->stride = pdata->bank_stride;
1190 bank->width = pdata->bank_width; 1193 bank->width = pdata->bank_width;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index d233eb3b8132..50caeb1ee350 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
570 "could not connect irqchip to gpiochip\n"); 570 "could not connect irqchip to gpiochip\n");
571 return ret; 571 return ret;
572 } 572 }
573
574 gpiochip_set_chained_irqchip(&chip->gpio_chip,
575 &pca953x_irq_chip,
576 client->irq, NULL);
573 } 577 }
574 578
575 return 0; 579 return 0;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 77fe5d3cb105..d5284dfe01fe 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
220 if (!chip->gpio_width[1]) 220 if (!chip->gpio_width[1])
221 return; 221 return;
222 222
223 xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET, 223 xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
224 chip->gpio_state[1]); 224 chip->gpio_state[1]);
225 xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET, 225 xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
226 chip->gpio_dir[1]); 226 chip->gpio_dir[1]);
227} 227}
228 228
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 2e87c4b8da26..a78882389836 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
757 gpiochip_remove(&gpio->chip); 757 gpiochip_remove(&gpio->chip);
758 clk_disable_unprepare(gpio->clk); 758 clk_disable_unprepare(gpio->clk);
759 device_set_wakeup_capable(&pdev->dev, 0); 759 device_set_wakeup_capable(&pdev->dev, 0);
760 pm_runtime_disable(&pdev->dev);
760 return 0; 761 return 0;
761} 762}
762 763
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 01657830b470..f7b49d5ce4b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
1130 uint32_t me_feature_version; 1130 uint32_t me_feature_version;
1131 uint32_t ce_feature_version; 1131 uint32_t ce_feature_version;
1132 uint32_t pfp_feature_version; 1132 uint32_t pfp_feature_version;
1133 uint32_t rlc_feature_version;
1134 uint32_t mec_feature_version;
1135 uint32_t mec2_feature_version;
1133 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1136 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1134 unsigned num_gfx_rings; 1137 unsigned num_gfx_rings;
1135 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1138 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1614,6 +1617,9 @@ struct amdgpu_uvd {
1614#define AMDGPU_MAX_VCE_HANDLES 16 1617#define AMDGPU_MAX_VCE_HANDLES 16
1615#define AMDGPU_VCE_FIRMWARE_OFFSET 256 1618#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1616 1619
1620#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1621#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1622
1617struct amdgpu_vce { 1623struct amdgpu_vce {
1618 struct amdgpu_bo *vcpu_bo; 1624 struct amdgpu_bo *vcpu_bo;
1619 uint64_t gpu_addr; 1625 uint64_t gpu_addr;
@@ -1626,6 +1632,7 @@ struct amdgpu_vce {
1626 const struct firmware *fw; /* VCE firmware */ 1632 const struct firmware *fw; /* VCE firmware */
1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1633 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1628 struct amdgpu_irq_src irq; 1634 struct amdgpu_irq_src irq;
1635 unsigned harvest_config;
1629}; 1636};
1630 1637
1631/* 1638/*
@@ -1635,6 +1642,7 @@ struct amdgpu_sdma {
1635 /* SDMA firmware */ 1642 /* SDMA firmware */
1636 const struct firmware *fw; 1643 const struct firmware *fw;
1637 uint32_t fw_version; 1644 uint32_t fw_version;
1645 uint32_t feature_version;
1638 1646
1639 struct amdgpu_ring ring; 1647 struct amdgpu_ring ring;
1640}; 1648};
@@ -1862,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1862typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1870typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1863typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1871typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1864 1872
1873struct amdgpu_ip_block_status {
1874 bool valid;
1875 bool sw;
1876 bool hw;
1877};
1878
1865struct amdgpu_device { 1879struct amdgpu_device {
1866 struct device *dev; 1880 struct device *dev;
1867 struct drm_device *ddev; 1881 struct drm_device *ddev;
@@ -2004,7 +2018,7 @@ struct amdgpu_device {
2004 2018
2005 const struct amdgpu_ip_block_version *ip_blocks; 2019 const struct amdgpu_ip_block_version *ip_blocks;
2006 int num_ip_blocks; 2020 int num_ip_blocks;
2007 bool *ip_block_enabled; 2021 struct amdgpu_ip_block_status *ip_block_status;
2008 struct mutex mn_lock; 2022 struct mutex mn_lock;
2009 DECLARE_HASHTABLE(mn_hash, 7); 2023 DECLARE_HASHTABLE(mn_hash, 7);
2010 2024
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d63135bf29c0..1f040d85ac47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
669static int amdgpu_cs_dependencies(struct amdgpu_device *adev, 669static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
670 struct amdgpu_cs_parser *p) 670 struct amdgpu_cs_parser *p)
671{ 671{
672 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
672 struct amdgpu_ib *ib; 673 struct amdgpu_ib *ib;
673 int i, j, r; 674 int i, j, r;
674 675
@@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
694 for (j = 0; j < num_deps; ++j) { 695 for (j = 0; j < num_deps; ++j) {
695 struct amdgpu_fence *fence; 696 struct amdgpu_fence *fence;
696 struct amdgpu_ring *ring; 697 struct amdgpu_ring *ring;
698 struct amdgpu_ctx *ctx;
697 699
698 r = amdgpu_cs_get_ring(adev, deps[j].ip_type, 700 r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
699 deps[j].ip_instance, 701 deps[j].ip_instance,
@@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
701 if (r) 703 if (r)
702 return r; 704 return r;
703 705
706 ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
707 if (ctx == NULL)
708 return -EINVAL;
709
704 r = amdgpu_fence_recreate(ring, p->filp, 710 r = amdgpu_fence_recreate(ring, p->filp,
705 deps[j].handle, 711 deps[j].handle,
706 &fence); 712 &fence);
707 if (r) 713 if (r) {
714 amdgpu_ctx_put(ctx);
708 return r; 715 return r;
716 }
709 717
710 amdgpu_sync_fence(&ib->sync, fence); 718 amdgpu_sync_fence(&ib->sync, fence);
711 amdgpu_fence_unref(&fence); 719 amdgpu_fence_unref(&fence);
720 amdgpu_ctx_put(ctx);
712 } 721 }
713 } 722 }
714 723
@@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
808 817
809 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance, 818 r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
810 wait->in.ring, &ring); 819 wait->in.ring, &ring);
811 if (r) 820 if (r) {
821 amdgpu_ctx_put(ctx);
812 return r; 822 return r;
823 }
813 824
814 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); 825 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
815 if (r) 826 if (r) {
827 amdgpu_ctx_put(ctx);
816 return r; 828 return r;
829 }
817 830
818 r = fence_wait_timeout(&fence->base, true, timeout); 831 r = fence_wait_timeout(&fence->base, true, timeout);
819 amdgpu_fence_unref(&fence); 832 amdgpu_fence_unref(&fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ba46be361c9b..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); 1194 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1195 if (adev->ip_block_enabled == NULL) 1195 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1196 if (adev->ip_block_status == NULL)
1196 return -ENOMEM; 1197 return -ENOMEM;
1197 1198
1198 if (adev->ip_blocks == NULL) { 1199 if (adev->ip_blocks == NULL) {
@@ -1203,14 +1204,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1203 for (i = 0; i < adev->num_ip_blocks; i++) { 1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1204 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1205 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1205 DRM_ERROR("disabled ip block: %d\n", i); 1206 DRM_ERROR("disabled ip block: %d\n", i);
1206 adev->ip_block_enabled[i] = false; 1207 adev->ip_block_status[i].valid = false;
1207 } else { 1208 } else {
1208 if (adev->ip_blocks[i].funcs->early_init) { 1209 if (adev->ip_blocks[i].funcs->early_init) {
1209 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1210 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1210 if (r) 1211 if (r == -ENOENT)
1212 adev->ip_block_status[i].valid = false;
1213 else if (r)
1211 return r; 1214 return r;
1215 else
1216 adev->ip_block_status[i].valid = true;
1217 } else {
1218 adev->ip_block_status[i].valid = true;
1212 } 1219 }
1213 adev->ip_block_enabled[i] = true;
1214 } 1220 }
1215 } 1221 }
1216 1222
@@ -1222,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1222 int i, r; 1228 int i, r;
1223 1229
1224 for (i = 0; i < adev->num_ip_blocks; i++) { 1230 for (i = 0; i < adev->num_ip_blocks; i++) {
1225 if (!adev->ip_block_enabled[i]) 1231 if (!adev->ip_block_status[i].valid)
1226 continue; 1232 continue;
1227 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1233 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1228 if (r) 1234 if (r)
1229 return r; 1235 return r;
1236 adev->ip_block_status[i].sw = true;
1230 /* need to do gmc hw init early so we can allocate gpu mem */ 1237 /* need to do gmc hw init early so we can allocate gpu mem */
1231 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1238 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1232 r = amdgpu_vram_scratch_init(adev); 1239 r = amdgpu_vram_scratch_init(adev);
@@ -1238,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1238 r = amdgpu_wb_init(adev); 1245 r = amdgpu_wb_init(adev);
1239 if (r) 1246 if (r)
1240 return r; 1247 return r;
1248 adev->ip_block_status[i].hw = true;
1241 } 1249 }
1242 } 1250 }
1243 1251
1244 for (i = 0; i < adev->num_ip_blocks; i++) { 1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1245 if (!adev->ip_block_enabled[i]) 1253 if (!adev->ip_block_status[i].sw)
1246 continue; 1254 continue;
1247 /* gmc hw init is done early */ 1255 /* gmc hw init is done early */
1248 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1256 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1250,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
1250 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1258 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1251 if (r) 1259 if (r)
1252 return r; 1260 return r;
1261 adev->ip_block_status[i].hw = true;
1253 } 1262 }
1254 1263
1255 return 0; 1264 return 0;
@@ -1260,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1260 int i = 0, r; 1269 int i = 0, r;
1261 1270
1262 for (i = 0; i < adev->num_ip_blocks; i++) { 1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1263 if (!adev->ip_block_enabled[i]) 1272 if (!adev->ip_block_status[i].valid)
1264 continue; 1273 continue;
1265 /* enable clockgating to save power */ 1274 /* enable clockgating to save power */
1266 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1275 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1282,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1282 int i, r; 1291 int i, r;
1283 1292
1284 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1293 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1285 if (!adev->ip_block_enabled[i]) 1294 if (!adev->ip_block_status[i].hw)
1286 continue; 1295 continue;
1287 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1296 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1288 amdgpu_wb_fini(adev); 1297 amdgpu_wb_fini(adev);
@@ -1295,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1295 return r; 1304 return r;
1296 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1305 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1297 /* XXX handle errors */ 1306 /* XXX handle errors */
1307 adev->ip_block_status[i].hw = false;
1298 } 1308 }
1299 1309
1300 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1310 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1301 if (!adev->ip_block_enabled[i]) 1311 if (!adev->ip_block_status[i].sw)
1302 continue; 1312 continue;
1303 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1313 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1304 /* XXX handle errors */ 1314 /* XXX handle errors */
1305 adev->ip_block_enabled[i] = false; 1315 adev->ip_block_status[i].sw = false;
1316 adev->ip_block_status[i].valid = false;
1306 } 1317 }
1307 1318
1308 return 0; 1319 return 0;
@@ -1313,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1313 int i, r; 1324 int i, r;
1314 1325
1315 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1326 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1316 if (!adev->ip_block_enabled[i]) 1327 if (!adev->ip_block_status[i].valid)
1317 continue; 1328 continue;
1318 /* ungate blocks so that suspend can properly shut them down */ 1329 /* ungate blocks so that suspend can properly shut them down */
1319 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1330 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1331,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1331 int i, r; 1342 int i, r;
1332 1343
1333 for (i = 0; i < adev->num_ip_blocks; i++) { 1344 for (i = 0; i < adev->num_ip_blocks; i++) {
1334 if (!adev->ip_block_enabled[i]) 1345 if (!adev->ip_block_status[i].valid)
1335 continue; 1346 continue;
1336 r = adev->ip_blocks[i].funcs->resume(adev); 1347 r = adev->ip_blocks[i].funcs->resume(adev);
1337 if (r) 1348 if (r)
@@ -1577,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1577 amdgpu_fence_driver_fini(adev); 1588 amdgpu_fence_driver_fini(adev);
1578 amdgpu_fbdev_fini(adev); 1589 amdgpu_fbdev_fini(adev);
1579 r = amdgpu_fini(adev); 1590 r = amdgpu_fini(adev);
1580 kfree(adev->ip_block_enabled); 1591 kfree(adev->ip_block_status);
1581 adev->ip_block_enabled = NULL; 1592 adev->ip_block_status = NULL;
1582 adev->accel_working = false; 1593 adev->accel_working = false;
1583 /* free i2c buses */ 1594 /* free i2c buses */
1584 amdgpu_i2c_fini(adev); 1595 amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 975edb1000a2..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
352 if (((int64_t)timeout_ns) < 0) 352 if (((int64_t)timeout_ns) < 0)
353 return MAX_SCHEDULE_TIMEOUT; 353 return MAX_SCHEDULE_TIMEOUT;
354 354
355 timeout = ktime_sub_ns(ktime_get(), timeout_ns); 355 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
356 if (ktime_to_ns(timeout) < 0) 356 if (ktime_to_ns(timeout) < 0)
357 return 0; 357 return 0;
358 358
@@ -449,7 +449,7 @@ out:
449 * vital here, so they are not reported back to userspace. 449 * vital here, so they are not reported back to userspace.
450 */ 450 */
451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va) 452 struct amdgpu_bo_va *bo_va, uint32_t operation)
453{ 453{
454 struct ttm_validate_buffer tv, *entry; 454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 455 struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
485 if (r) 485 if (r)
486 goto error_unlock; 486 goto error_unlock;
487 487
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 488
489 if (operation == AMDGPU_VA_OP_MAP)
490 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
489 491
490error_unlock: 492error_unlock:
491 mutex_unlock(&bo_va->vm->mutex); 493 mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580 } 582 }
581 583
582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 584 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va); 585 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
584 586
585 drm_gem_object_unreference_unlocked(gobj); 587 drm_gem_object_unreference_unlocked(gobj);
586 return r; 588 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
180 if (vm) { 180 if (vm) {
181 /* do context switch */ 181 /* do context switch */
182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
183 }
184 183
185 if (vm && ring->funcs->emit_gds_switch) 184 if (ring->funcs->emit_gds_switch)
186 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 185 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
187 ib->gds_base, ib->gds_size, 186 ib->gds_base, ib->gds_size,
188 ib->gws_base, ib->gws_size, 187 ib->gws_base, ib->gws_size,
189 ib->oa_base, ib->oa_size); 188 ib->oa_base, ib->oa_size);
190 189
191 if (ring->funcs->emit_hdp_flush) 190 if (ring->funcs->emit_hdp_flush)
192 amdgpu_ring_emit_hdp_flush(ring); 191 amdgpu_ring_emit_hdp_flush(ring);
192 }
193 193
194 old_ctx = ring->current_ctx; 194 old_ctx = ring->current_ctx;
195 for (i = 0; i < num_ibs; ++i) { 195 for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 5533434c7a8f..3bfe67de8349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
235 235
236 for (i = 0; i < adev->num_ip_blocks; i++) { 236 for (i = 0; i < adev->num_ip_blocks; i++) {
237 if (adev->ip_blocks[i].type == type && 237 if (adev->ip_blocks[i].type == type &&
238 adev->ip_block_enabled[i]) { 238 adev->ip_block_status[i].valid) {
239 ip.hw_ip_version_major = adev->ip_blocks[i].major; 239 ip.hw_ip_version_major = adev->ip_blocks[i].major;
240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor; 240 ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
241 ip.capabilities_flags = 0; 241 ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
274 274
275 for (i = 0; i < adev->num_ip_blocks; i++) 275 for (i = 0; i < adev->num_ip_blocks; i++)
276 if (adev->ip_blocks[i].type == type && 276 if (adev->ip_blocks[i].type == type &&
277 adev->ip_block_enabled[i] && 277 adev->ip_block_status[i].valid &&
278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 278 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
279 count++; 279 count++;
280 280
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
317 break; 317 break;
318 case AMDGPU_INFO_FW_GFX_RLC: 318 case AMDGPU_INFO_FW_GFX_RLC:
319 fw_info.ver = adev->gfx.rlc_fw_version; 319 fw_info.ver = adev->gfx.rlc_fw_version;
320 fw_info.feature = 0; 320 fw_info.feature = adev->gfx.rlc_feature_version;
321 break; 321 break;
322 case AMDGPU_INFO_FW_GFX_MEC: 322 case AMDGPU_INFO_FW_GFX_MEC:
323 if (info->query_fw.index == 0) 323 if (info->query_fw.index == 0) {
324 fw_info.ver = adev->gfx.mec_fw_version; 324 fw_info.ver = adev->gfx.mec_fw_version;
325 else if (info->query_fw.index == 1) 325 fw_info.feature = adev->gfx.mec_feature_version;
326 } else if (info->query_fw.index == 1) {
326 fw_info.ver = adev->gfx.mec2_fw_version; 327 fw_info.ver = adev->gfx.mec2_fw_version;
327 else 328 fw_info.feature = adev->gfx.mec2_feature_version;
329 } else
328 return -EINVAL; 330 return -EINVAL;
329 fw_info.feature = 0;
330 break; 331 break;
331 case AMDGPU_INFO_FW_SMC: 332 case AMDGPU_INFO_FW_SMC:
332 fw_info.ver = adev->pm.fw_version; 333 fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
336 if (info->query_fw.index >= 2) 337 if (info->query_fw.index >= 2)
337 return -EINVAL; 338 return -EINVAL;
338 fw_info.ver = adev->sdma[info->query_fw.index].fw_version; 339 fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
339 fw_info.feature = 0; 340 fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
340 break; 341 break;
341 default: 342 default:
342 return -EINVAL; 343 return -EINVAL;
@@ -416,7 +417,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
416 return n ? -EFAULT : 0; 417 return n ? -EFAULT : 0;
417 } 418 }
418 case AMDGPU_INFO_DEV_INFO: { 419 case AMDGPU_INFO_DEV_INFO: {
419 struct drm_amdgpu_info_device dev_info; 420 struct drm_amdgpu_info_device dev_info = {};
420 struct amdgpu_cu_info cu_info; 421 struct amdgpu_cu_info cu_info;
421 422
422 dev_info.device_id = dev->pdev->device; 423 dev_info.device_id = dev->pdev->device;
@@ -459,6 +460,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
459 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); 460 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
460 dev_info.vram_type = adev->mc.vram_type; 461 dev_info.vram_type = adev->mc.vram_type;
461 dev_info.vram_bit_width = adev->mc.vram_width; 462 dev_info.vram_bit_width = adev->mc.vram_width;
463 dev_info.vce_harvest_config = adev->vce.harvest_config;
462 464
463 return copy_to_user(out, &dev_info, 465 return copy_to_user(out, &dev_info,
464 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 466 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..f5c22556ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
374 unsigned height_in_mb = ALIGN(height / 16, 2); 374 unsigned height_in_mb = ALIGN(height / 16, 2);
375 unsigned fs_in_mb = width_in_mb * height_in_mb; 375 unsigned fs_in_mb = width_in_mb * height_in_mb;
376 376
377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
378 378
379 image_size = width * height; 379 image_size = width * height;
380 image_size += image_size / 2; 380 image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
466 466
467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
468 min_dpb_size = image_size * num_dpb_buffer; 468 min_dpb_size = image_size * num_dpb_buffer;
469 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
470 * 16 * num_dpb_buffer + 52 * 1024;
469 break; 471 break;
470 472
471 default: 473 default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
486 488
487 buf_sizes[0x1] = dpb_size; 489 buf_sizes[0x1] = dpb_size;
488 buf_sizes[0x2] = image_size; 490 buf_sizes[0x2] = image_size;
491 buf_sizes[0x4] = min_ctx_size;
489 return 0; 492 return 0;
490} 493}
491 494
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
628 return -EINVAL; 631 return -EINVAL;
629 } 632 }
630 633
634 } else if (cmd == 0x206) {
635 if ((end - start) < ctx->buf_sizes[4]) {
636 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
637 (unsigned)(end - start),
638 ctx->buf_sizes[4]);
639 return -EINVAL;
640 }
631 } else if ((cmd != 0x100) && (cmd != 0x204)) { 641 } else if ((cmd != 0x100) && (cmd != 0x204)) {
632 DRM_ERROR("invalid UVD command %X!\n", cmd); 642 DRM_ERROR("invalid UVD command %X!\n", cmd);
633 return -EINVAL; 643 return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
755 struct amdgpu_uvd_cs_ctx ctx = {}; 765 struct amdgpu_uvd_cs_ctx ctx = {};
756 unsigned buf_sizes[] = { 766 unsigned buf_sizes[] = {
757 [0x00000000] = 2048, 767 [0x00000000] = 2048,
758 [0x00000001] = 32 * 1024 * 1024, 768 [0x00000001] = 0xFFFFFFFF,
759 [0x00000002] = 2048 * 1152 * 3, 769 [0x00000002] = 0xFFFFFFFF,
760 [0x00000003] = 2048, 770 [0x00000003] = 2048,
771 [0x00000004] = 0xFFFFFFFF,
761 }; 772 };
762 struct amdgpu_ib *ib = &parser->ibs[ib_idx]; 773 struct amdgpu_ib *ib = &parser->ibs[ib_idx];
763 int r; 774 int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
500 amdgpu_ucode_print_sdma_hdr(&hdr->header); 500 amdgpu_ucode_print_sdma_hdr(&hdr->header);
501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
503 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
503 fw_data = (const __le32 *) 504 fw_data = (const __le32 *)
504 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 505 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
505 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 506 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f75a31df30bd..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
494 amdgpu_free_extended_power_table(adev); 494 amdgpu_free_extended_power_table(adev);
495} 495}
496 496
497#define ixSMUSVI_NB_CURRENTVID 0xD8230044
498#define CURRENT_NB_VID_MASK 0xff000000
499#define CURRENT_NB_VID__SHIFT 24
500#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
501#define CURRENT_GFX_VID_MASK 0xff000000
502#define CURRENT_GFX_VID__SHIFT 24
503
497static void 504static void
498cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 505cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
499 struct seq_file *m) 506 struct seq_file *m)
500{ 507{
508 struct cz_power_info *pi = cz_get_pi(adev);
501 struct amdgpu_clock_voltage_dependency_table *table = 509 struct amdgpu_clock_voltage_dependency_table *table =
502 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 510 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
503 u32 current_index = 511 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
504 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 512 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 513 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
506 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 514 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
507 u32 sclk, tmp; 515 u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
508 u16 vddc; 516 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
509 517 u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
510 if (current_index >= NUM_SCLK_LEVELS) { 518 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
511 seq_printf(m, "invalid dpm profile %d\n", current_index); 519 u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
520 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
521 u32 sclk, vclk, dclk, ecclk, tmp;
522 u16 vddnb, vddgfx;
523
524 if (sclk_index >= NUM_SCLK_LEVELS) {
525 seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
512 } else { 526 } else {
513 sclk = table->entries[current_index].clk; 527 sclk = table->entries[sclk_index].clk;
514 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 528 seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 529 }
516 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 530
517 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 531 tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
518 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 532 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
519 current_index, sclk, vddc); 533 vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
534 tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
535 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
536 vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
537 seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
538
539 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
540 if (!pi->uvd_power_gated) {
541 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
542 seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
543 } else {
544 vclk = uvd_table->entries[uvd_index].vclk;
545 dclk = uvd_table->entries[uvd_index].dclk;
546 seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
547 }
548 }
549
550 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
551 if (!pi->vce_power_gated) {
552 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
553 seq_printf(m, "invalid vce dpm level %d\n", vce_index);
554 } else {
555 ecclk = vce_table->entries[vce_index].ecclk;
556 seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
557 }
520 } 558 }
521} 559}
522 560
@@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
1679 if (ret) 1717 if (ret)
1680 return ret; 1718 return ret;
1681 1719
1682 DRM_INFO("DPM unforce state min=%d, max=%d.\n", 1720 DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
1683 pi->sclk_dpm.soft_min_clk, 1721 pi->sclk_dpm.soft_min_clk,
1684 pi->sclk_dpm.soft_max_clk); 1722 pi->sclk_dpm.soft_max_clk);
1685 1723
1686 return 0; 1724 return 0;
1687} 1725}
1688 1726
1689static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, 1727static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1690 enum amdgpu_dpm_forced_level level) 1728 enum amdgpu_dpm_forced_level level)
1691{ 1729{
1692 int ret = 0; 1730 int ret = 0;
1693 1731
1694 switch (level) { 1732 switch (level) {
1695 case AMDGPU_DPM_FORCED_LEVEL_HIGH: 1733 case AMDGPU_DPM_FORCED_LEVEL_HIGH:
1734 ret = cz_dpm_unforce_dpm_levels(adev);
1735 if (ret)
1736 return ret;
1696 ret = cz_dpm_force_highest(adev); 1737 ret = cz_dpm_force_highest(adev);
1697 if (ret) 1738 if (ret)
1698 return ret; 1739 return ret;
1699 break; 1740 break;
1700 case AMDGPU_DPM_FORCED_LEVEL_LOW: 1741 case AMDGPU_DPM_FORCED_LEVEL_LOW:
1742 ret = cz_dpm_unforce_dpm_levels(adev);
1743 if (ret)
1744 return ret;
1701 ret = cz_dpm_force_lowest(adev); 1745 ret = cz_dpm_force_lowest(adev);
1702 if (ret) 1746 if (ret)
1703 return ret; 1747 return ret;
@@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
1711 break; 1755 break;
1712 } 1756 }
1713 1757
1758 adev->pm.dpm.forced_level = level;
1759
1714 return ret; 1760 return ret;
1715} 1761}
1716 1762
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2632 struct drm_device *dev = crtc->dev; 2632 struct drm_device *dev = crtc->dev;
2633 struct amdgpu_device *adev = dev->dev_private; 2633 struct amdgpu_device *adev = dev->dev_private;
2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2635 unsigned type;
2635 2636
2636 switch (mode) { 2637 switch (mode) {
2637 case DRM_MODE_DPMS_ON: 2638 case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2640 dce_v10_0_vga_enable(crtc, true); 2641 dce_v10_0_vga_enable(crtc, true);
2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2642 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2642 dce_v10_0_vga_enable(crtc, false); 2643 dce_v10_0_vga_enable(crtc, false);
2644 /* Make sure VBLANK interrupt is still enabled */
2645 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2646 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2643 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2647 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2644 dce_v10_0_crtc_load_lut(crtc); 2648 dce_v10_0_crtc_load_lut(crtc);
2645 break; 2649 break;
@@ -3403,19 +3407,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3403 3407
3404 switch (entry->src_data) { 3408 switch (entry->src_data) {
3405 case 0: /* vblank */ 3409 case 0: /* vblank */
3406 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3410 if (disp_int & interrupt_status_offsets[crtc].vblank)
3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc); 3411 dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3408 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3412 else
3409 drm_handle_vblank(adev->ddev, crtc); 3413 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3410 } 3414
3411 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3415 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3416 drm_handle_vblank(adev->ddev, crtc);
3412 } 3417 }
3418 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3419
3413 break; 3420 break;
3414 case 1: /* vline */ 3421 case 1: /* vline */
3415 if (disp_int & interrupt_status_offsets[crtc].vline) { 3422 if (disp_int & interrupt_status_offsets[crtc].vline)
3416 dce_v10_0_crtc_vline_int_ack(adev, crtc); 3423 dce_v10_0_crtc_vline_int_ack(adev, crtc);
3417 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3424 else
3418 } 3425 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3426
3427 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3428
3419 break; 3429 break;
3420 default: 3430 default:
3421 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3431 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2631 struct drm_device *dev = crtc->dev; 2631 struct drm_device *dev = crtc->dev;
2632 struct amdgpu_device *adev = dev->dev_private; 2632 struct amdgpu_device *adev = dev->dev_private;
2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2634 unsigned type;
2634 2635
2635 switch (mode) { 2636 switch (mode) {
2636 case DRM_MODE_DPMS_ON: 2637 case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2639 dce_v11_0_vga_enable(crtc, true); 2640 dce_v11_0_vga_enable(crtc, true);
2640 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2641 dce_v11_0_vga_enable(crtc, false); 2642 dce_v11_0_vga_enable(crtc, false);
2643 /* Make sure VBLANK interrupt is still enabled */
2644 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2645 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2642 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2646 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2643 dce_v11_0_crtc_load_lut(crtc); 2647 dce_v11_0_crtc_load_lut(crtc);
2644 break; 2648 break;
@@ -3402,19 +3406,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3402 3406
3403 switch (entry->src_data) { 3407 switch (entry->src_data) {
3404 case 0: /* vblank */ 3408 case 0: /* vblank */
3405 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3409 if (disp_int & interrupt_status_offsets[crtc].vblank)
3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc); 3410 dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3407 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3411 else
3408 drm_handle_vblank(adev->ddev, crtc); 3412 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3409 } 3413
3410 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3414 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3415 drm_handle_vblank(adev->ddev, crtc);
3411 } 3416 }
3417 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3418
3412 break; 3419 break;
3413 case 1: /* vline */ 3420 case 1: /* vline */
3414 if (disp_int & interrupt_status_offsets[crtc].vline) { 3421 if (disp_int & interrupt_status_offsets[crtc].vline)
3415 dce_v11_0_crtc_vline_int_ack(adev, crtc); 3422 dce_v11_0_crtc_vline_int_ack(adev, crtc);
3416 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3423 else
3417 } 3424 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3425
3426 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3427
3418 break; 3428 break;
3419 default: 3429 default:
3420 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3430 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index aaca8d663f2c..cc050a329c49 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2566 struct drm_device *dev = crtc->dev; 2566 struct drm_device *dev = crtc->dev;
2567 struct amdgpu_device *adev = dev->dev_private; 2567 struct amdgpu_device *adev = dev->dev_private;
2568 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2568 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2569 unsigned type;
2569 2570
2570 switch (mode) { 2571 switch (mode) {
2571 case DRM_MODE_DPMS_ON: 2572 case DRM_MODE_DPMS_ON:
@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2574 dce_v8_0_vga_enable(crtc, true); 2575 dce_v8_0_vga_enable(crtc, true);
2575 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2576 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2576 dce_v8_0_vga_enable(crtc, false); 2577 dce_v8_0_vga_enable(crtc, false);
2578 /* Make sure VBLANK interrupt is still enabled */
2579 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2580 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2577 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2581 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2578 dce_v8_0_crtc_load_lut(crtc); 2582 dce_v8_0_crtc_load_lut(crtc);
2579 break; 2583 break;
@@ -3237,19 +3241,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3237 3241
3238 switch (entry->src_data) { 3242 switch (entry->src_data) {
3239 case 0: /* vblank */ 3243 case 0: /* vblank */
3240 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3244 if (disp_int & interrupt_status_offsets[crtc].vblank)
3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3245 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3242 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3246 else
3243 drm_handle_vblank(adev->ddev, crtc); 3247 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3244 } 3248
3245 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3249 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3250 drm_handle_vblank(adev->ddev, crtc);
3246 } 3251 }
3252 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3253
3247 break; 3254 break;
3248 case 1: /* vline */ 3255 case 1: /* vline */
3249 if (disp_int & interrupt_status_offsets[crtc].vline) { 3256 if (disp_int & interrupt_status_offsets[crtc].vline)
3250 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3257 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3251 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3258 else
3252 } 3259 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3260
3261 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3262
3253 break; 3263 break;
3254 default: 3264 default:
3255 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3265 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
2561 * sheduling on the ring. This function schedules the IB 2561 * sheduling on the ring. This function schedules the IB
2562 * on the gfx ring for execution by the GPU. 2562 * on the gfx ring for execution by the GPU.
2563 */ 2563 */
2564static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 2564static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2565 struct amdgpu_ib *ib) 2565 struct amdgpu_ib *ib)
2566{ 2566{
2567 bool need_ctx_switch = ring->current_ctx != ib->ctx; 2567 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2569 u32 next_rptr = ring->wptr + 5; 2569 u32 next_rptr = ring->wptr + 5;
2570 2570
2571 /* drop the CE preamble IB for the same context */ 2571 /* drop the CE preamble IB for the same context */
2572 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 2572 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
2573 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
2574 !need_ctx_switch)
2575 return; 2573 return;
2576 2574
2577 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 2575 if (need_ctx_switch)
2578 control |= INDIRECT_BUFFER_VALID;
2579
2580 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
2581 next_rptr += 2; 2576 next_rptr += 2;
2582 2577
2583 next_rptr += 4; 2578 next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2588 amdgpu_ring_write(ring, next_rptr); 2583 amdgpu_ring_write(ring, next_rptr);
2589 2584
2590 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2585 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2591 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 2586 if (need_ctx_switch) {
2592 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2587 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2593 amdgpu_ring_write(ring, 0); 2588 amdgpu_ring_write(ring, 0);
2594 } 2589 }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2611 amdgpu_ring_write(ring, control); 2606 amdgpu_ring_write(ring, control);
2612} 2607}
2613 2608
2609static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2610 struct amdgpu_ib *ib)
2611{
2612 u32 header, control = 0;
2613 u32 next_rptr = ring->wptr + 5;
2614
2615 control |= INDIRECT_BUFFER_VALID;
2616 next_rptr += 4;
2617 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2618 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2619 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2620 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2621 amdgpu_ring_write(ring, next_rptr);
2622
2623 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2624
2625 control |= ib->length_dw |
2626 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2627
2628 amdgpu_ring_write(ring, header);
2629 amdgpu_ring_write(ring,
2630#ifdef __BIG_ENDIAN
2631 (2 << 0) |
2632#endif
2633 (ib->gpu_addr & 0xFFFFFFFC));
2634 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2635 amdgpu_ring_write(ring, control);
2636}
2637
2614/** 2638/**
2615 * gfx_v7_0_ring_test_ib - basic ring IB test 2639 * gfx_v7_0_ring_test_ib - basic ring IB test
2616 * 2640 *
@@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3056 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3080 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3057 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3081 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3058 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); 3082 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
3083 adev->gfx.mec_feature_version = le32_to_cpu(
3084 mec_hdr->ucode_feature_version);
3059 3085
3060 gfx_v7_0_cp_compute_enable(adev, false); 3086 gfx_v7_0_cp_compute_enable(adev, false);
3061 3087
@@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3078 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 3104 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3079 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 3105 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
3080 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); 3106 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
3107 adev->gfx.mec2_feature_version = le32_to_cpu(
3108 mec2_hdr->ucode_feature_version);
3081 3109
3082 /* MEC2 */ 3110 /* MEC2 */
3083 fw_data = (const __le32 *) 3111 fw_data = (const __le32 *)
@@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
4042 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; 4070 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
4043 amdgpu_ucode_print_rlc_hdr(&hdr->header); 4071 amdgpu_ucode_print_rlc_hdr(&hdr->header);
4044 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); 4072 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
4073 adev->gfx.rlc_feature_version = le32_to_cpu(
4074 hdr->ucode_feature_version);
4045 4075
4046 gfx_v7_0_rlc_stop(adev); 4076 gfx_v7_0_rlc_stop(adev);
4047 4077
@@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
5098 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", 5128 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
5099 RREG32(mmCP_HPD_EOP_CONTROL)); 5129 RREG32(mmCP_HPD_EOP_CONTROL));
5100 5130
5101 for (queue = 0; queue < 8; i++) { 5131 for (queue = 0; queue < 8; queue++) {
5102 cik_srbm_select(adev, me, pipe, queue, 0); 5132 cik_srbm_select(adev, me, pipe, queue, 0);
5103 dev_info(adev->dev, " queue: %d\n", queue); 5133 dev_info(adev->dev, " queue: %d\n", queue);
5104 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", 5134 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5555 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5585 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5556 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 5586 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5557 .parse_cs = NULL, 5587 .parse_cs = NULL,
5558 .emit_ib = gfx_v7_0_ring_emit_ib, 5588 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5559 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5589 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5560 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5590 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5561 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5591 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5571 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 5601 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5572 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 5602 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5573 .parse_cs = NULL, 5603 .parse_cs = NULL,
5574 .emit_ib = gfx_v7_0_ring_emit_ib, 5604 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5575 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5605 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5576 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5606 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5577 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5607 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 7b683fb2173c..20e2cfd521d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
587 int err; 587 int err;
588 struct amdgpu_firmware_info *info = NULL; 588 struct amdgpu_firmware_info *info = NULL;
589 const struct common_firmware_header *header = NULL; 589 const struct common_firmware_header *header = NULL;
590 const struct gfx_firmware_header_v1_0 *cp_hdr;
590 591
591 DRM_DEBUG("\n"); 592 DRM_DEBUG("\n");
592 593
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
611 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 612 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
612 if (err) 613 if (err)
613 goto out; 614 goto out;
615 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
616 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
617 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
614 618
615 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 619 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
616 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 620 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
619 err = amdgpu_ucode_validate(adev->gfx.me_fw); 623 err = amdgpu_ucode_validate(adev->gfx.me_fw);
620 if (err) 624 if (err)
621 goto out; 625 goto out;
626 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
627 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
628 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
622 629
623 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 630 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
624 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 631 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
627 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 634 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
628 if (err) 635 if (err)
629 goto out; 636 goto out;
637 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
638 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
639 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
630 640
631 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 641 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
632 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 642 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
633 if (err) 643 if (err)
634 goto out; 644 goto out;
635 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 645 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
646 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
647 adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
648 adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
636 649
637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 650 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
638 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 651 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
641 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 654 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
642 if (err) 655 if (err)
643 goto out; 656 goto out;
657 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
658 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
659 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
644 660
645 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 661 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
646 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 662 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
648 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 664 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
649 if (err) 665 if (err)
650 goto out; 666 goto out;
667 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
668 adev->gfx.mec2_fw->data;
669 adev->gfx.mec2_fw_version = le32_to_cpu(
670 cp_hdr->header.ucode_version);
671 adev->gfx.mec2_feature_version = le32_to_cpu(
672 cp_hdr->ucode_feature_version);
651 } else { 673 } else {
652 err = 0; 674 err = 0;
653 adev->gfx.mec2_fw = NULL; 675 adev->gfx.mec2_fw = NULL;
@@ -1813,10 +1835,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
1813 u32 data, mask; 1835 u32 data, mask;
1814 1836
1815 data = RREG32(mmCC_RB_BACKEND_DISABLE); 1837 data = RREG32(mmCC_RB_BACKEND_DISABLE);
1816 if (data & 1) 1838 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1817 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1818 else
1819 data = 0;
1820 1839
1821 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1840 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
1822 1841
@@ -1986,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1986 adev->gfx.config.max_shader_engines = 1; 2005 adev->gfx.config.max_shader_engines = 1;
1987 adev->gfx.config.max_tile_pipes = 2; 2006 adev->gfx.config.max_tile_pipes = 2;
1988 adev->gfx.config.max_sh_per_se = 1; 2007 adev->gfx.config.max_sh_per_se = 1;
2008 adev->gfx.config.max_backends_per_se = 2;
1989 2009
1990 switch (adev->pdev->revision) { 2010 switch (adev->pdev->revision) {
1991 case 0xc4: 2011 case 0xc4:
@@ -1994,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1994 case 0xcc: 2014 case 0xcc:
1995 /* B10 */ 2015 /* B10 */
1996 adev->gfx.config.max_cu_per_sh = 8; 2016 adev->gfx.config.max_cu_per_sh = 8;
1997 adev->gfx.config.max_backends_per_se = 2;
1998 break; 2017 break;
1999 case 0xc5: 2018 case 0xc5:
2000 case 0x81: 2019 case 0x81:
@@ -2003,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2003 case 0xcd: 2022 case 0xcd:
2004 /* B8 */ 2023 /* B8 */
2005 adev->gfx.config.max_cu_per_sh = 6; 2024 adev->gfx.config.max_cu_per_sh = 6;
2006 adev->gfx.config.max_backends_per_se = 2;
2007 break; 2025 break;
2008 case 0xc6: 2026 case 0xc6:
2009 case 0xca: 2027 case 0xca:
2010 case 0xce: 2028 case 0xce:
2011 /* B6 */ 2029 /* B6 */
2012 adev->gfx.config.max_cu_per_sh = 6; 2030 adev->gfx.config.max_cu_per_sh = 6;
2013 adev->gfx.config.max_backends_per_se = 2;
2014 break; 2031 break;
2015 case 0xc7: 2032 case 0xc7:
2016 case 0x87: 2033 case 0x87:
@@ -2018,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2018 default: 2035 default:
2019 /* B4 */ 2036 /* B4 */
2020 adev->gfx.config.max_cu_per_sh = 4; 2037 adev->gfx.config.max_cu_per_sh = 4;
2021 adev->gfx.config.max_backends_per_se = 1;
2022 break; 2038 break;
2023 } 2039 }
2024 2040
@@ -2278,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
2278 2294
2279 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2295 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2280 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2296 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2281 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
2282 2297
2283 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2298 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2284 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2299 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2364,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2364 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2379 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2365 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2380 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2366 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2381 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2367 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2368 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2369 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2370 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2371 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2372 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2373 2382
2374 gfx_v8_0_cp_gfx_enable(adev, false); 2383 gfx_v8_0_cp_gfx_enable(adev, false);
2375 2384
@@ -2625,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2625 2634
2626 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2635 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2627 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2636 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2628 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2629 2637
2630 fw_data = (const __le32 *) 2638 fw_data = (const __le32 *)
2631 (adev->gfx.mec_fw->data + 2639 (adev->gfx.mec_fw->data +
@@ -2644,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2644 2652
2645 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 2653 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2646 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 2654 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2647 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2648 2655
2649 fw_data = (const __le32 *) 2656 fw_data = (const __le32 *)
2650 (adev->gfx.mec2_fw->data + 2657 (adev->gfx.mec2_fw->data +
@@ -3128,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3128 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 3135 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3129 AMDGPU_DOORBELL_KIQ << 2); 3136 AMDGPU_DOORBELL_KIQ << 2);
3130 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 3137 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3131 0x7FFFF << 2); 3138 AMDGPU_DOORBELL_MEC_RING7 << 2);
3132 } 3139 }
3133 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 3140 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3134 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3141 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3756,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3756 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3763 amdgpu_ring_write(ring, 0x20); /* poll interval */
3757} 3764}
3758 3765
3759static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, 3766static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3760 struct amdgpu_ib *ib) 3767 struct amdgpu_ib *ib)
3761{ 3768{
3762 bool need_ctx_switch = ring->current_ctx != ib->ctx; 3769 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3764,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3764 u32 next_rptr = ring->wptr + 5; 3771 u32 next_rptr = ring->wptr + 5;
3765 3772
3766 /* drop the CE preamble IB for the same context */ 3773 /* drop the CE preamble IB for the same context */
3767 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 3774 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
3768 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
3769 !need_ctx_switch)
3770 return; 3775 return;
3771 3776
3772 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 3777 if (need_ctx_switch)
3773 control |= INDIRECT_BUFFER_VALID;
3774
3775 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
3776 next_rptr += 2; 3778 next_rptr += 2;
3777 3779
3778 next_rptr += 4; 3780 next_rptr += 4;
@@ -3783,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3783 amdgpu_ring_write(ring, next_rptr); 3785 amdgpu_ring_write(ring, next_rptr);
3784 3786
3785 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 3787 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
3786 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 3788 if (need_ctx_switch) {
3787 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3789 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3788 amdgpu_ring_write(ring, 0); 3790 amdgpu_ring_write(ring, 0);
3789 } 3791 }
@@ -3806,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3806 amdgpu_ring_write(ring, control); 3808 amdgpu_ring_write(ring, control);
3807} 3809}
3808 3810
3811static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3812 struct amdgpu_ib *ib)
3813{
3814 u32 header, control = 0;
3815 u32 next_rptr = ring->wptr + 5;
3816
3817 control |= INDIRECT_BUFFER_VALID;
3818
3819 next_rptr += 4;
3820 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3821 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
3822 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3823 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3824 amdgpu_ring_write(ring, next_rptr);
3825
3826 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3827
3828 control |= ib->length_dw |
3829 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
3830
3831 amdgpu_ring_write(ring, header);
3832 amdgpu_ring_write(ring,
3833#ifdef __BIG_ENDIAN
3834 (2 << 0) |
3835#endif
3836 (ib->gpu_addr & 0xFFFFFFFC));
3837 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3838 amdgpu_ring_write(ring, control);
3839}
3840
3809static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 3841static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3810 u64 seq, unsigned flags) 3842 u64 seq, unsigned flags)
3811{ 3843{
@@ -4227,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
4227 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 4259 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
4228 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 4260 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
4229 .parse_cs = NULL, 4261 .parse_cs = NULL,
4230 .emit_ib = gfx_v8_0_ring_emit_ib, 4262 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
4231 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 4263 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
4232 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4264 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4233 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4265 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4243,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
4243 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 4275 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
4244 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 4276 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
4245 .parse_cs = NULL, 4277 .parse_cs = NULL,
4246 .emit_ib = gfx_v8_0_ring_emit_ib, 4278 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
4247 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 4279 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
4248 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4280 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4249 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4281 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
121 int err, i; 121 int err, i;
122 struct amdgpu_firmware_info *info = NULL; 122 struct amdgpu_firmware_info *info = NULL;
123 const struct common_firmware_header *header = NULL; 123 const struct common_firmware_header *header = NULL;
124 const struct sdma_firmware_header_v1_0 *hdr;
124 125
125 DRM_DEBUG("\n"); 126 DRM_DEBUG("\n");
126 127
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
142 err = amdgpu_ucode_validate(adev->sdma[i].fw); 143 err = amdgpu_ucode_validate(adev->sdma[i].fw);
143 if (err) 144 if (err)
144 goto out; 145 goto out;
146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
147 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
148 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
145 149
146 if (adev->firmware.smu_load) { 150 if (adev->firmware.smu_load) {
147 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 151 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
541 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 545 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
542 amdgpu_ucode_print_sdma_hdr(&hdr->header); 546 amdgpu_ucode_print_sdma_hdr(&hdr->header);
543 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 547 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
544 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
545
546 fw_data = (const __le32 *) 548 fw_data = (const __le32 *)
547 (adev->sdma[i].fw->data + 549 (adev->sdma[i].fw->data +
548 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 550 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
159 int err, i; 159 int err, i;
160 struct amdgpu_firmware_info *info = NULL; 160 struct amdgpu_firmware_info *info = NULL;
161 const struct common_firmware_header *header = NULL; 161 const struct common_firmware_header *header = NULL;
162 const struct sdma_firmware_header_v1_0 *hdr;
162 163
163 DRM_DEBUG("\n"); 164 DRM_DEBUG("\n");
164 165
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
183 err = amdgpu_ucode_validate(adev->sdma[i].fw); 184 err = amdgpu_ucode_validate(adev->sdma[i].fw);
184 if (err) 185 if (err)
185 goto out; 186 goto out;
187 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
188 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
189 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
186 190
187 if (adev->firmware.smu_load) { 191 if (adev->firmware.smu_load) {
188 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 192 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
630 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 634 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
631 amdgpu_ucode_print_sdma_hdr(&hdr->header); 635 amdgpu_ucode_print_sdma_hdr(&hdr->header);
632 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 636 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
633 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
634
635 fw_data = (const __le32 *) 637 fw_data = (const __le32 *)
636 (adev->sdma[i].fw->data + 638 (adev->sdma[i].fw->data +
637 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 639 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
35#include "oss/oss_2_0_d.h" 35#include "oss/oss_2_0_d.h"
36#include "oss/oss_2_0_sh_mask.h" 36#include "oss/oss_2_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h" 37#include "gca/gfx_8_0_d.h"
38#include "smu/smu_7_1_2_d.h"
39#include "smu/smu_7_1_2_sh_mask.h"
38 40
39#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
40#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
112 114
113 mutex_lock(&adev->grbm_idx_mutex); 115 mutex_lock(&adev->grbm_idx_mutex);
114 for (idx = 0; idx < 2; ++idx) { 116 for (idx = 0; idx < 2; ++idx) {
117
118 if (adev->vce.harvest_config & (1 << idx))
119 continue;
120
115 if(idx == 0) 121 if(idx == 0)
116 WREG32_P(mmGRBM_GFX_INDEX, 0, 122 WREG32_P(mmGRBM_GFX_INDEX, 0,
117 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 123 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
190 return 0; 196 return 0;
191} 197}
192 198
199#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
200#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
201#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
202
203static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
204{
205 u32 tmp;
206 unsigned ret;
207
208 if (adev->flags & AMDGPU_IS_APU)
209 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
210 VCE_HARVEST_FUSE_MACRO__MASK) >>
211 VCE_HARVEST_FUSE_MACRO__SHIFT;
212 else
213 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
214 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
215 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
216
217 switch (tmp) {
218 case 1:
219 ret = AMDGPU_VCE_HARVEST_VCE0;
220 break;
221 case 2:
222 ret = AMDGPU_VCE_HARVEST_VCE1;
223 break;
224 case 3:
225 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
226 break;
227 default:
228 ret = 0;
229 }
230
231 return ret;
232}
233
193static int vce_v3_0_early_init(void *handle) 234static int vce_v3_0_early_init(void *handle)
194{ 235{
195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
196 237
238 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
239
240 if ((adev->vce.harvest_config &
241 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
242 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
243 return -ENOENT;
244
197 vce_v3_0_set_ring_funcs(adev); 245 vce_v3_0_set_ring_funcs(adev);
198 vce_v3_0_set_irq_funcs(adev); 246 vce_v3_0_set_irq_funcs(adev);
199 247
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fa5a4448531d..68552da40287 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 122 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123} 123}
124 124
125/* smu_8_0_d.h */
126#define mmMP0PUB_IND_INDEX 0x180
127#define mmMP0PUB_IND_DATA 0x181
128
129static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
130{
131 unsigned long flags;
132 u32 r;
133
134 spin_lock_irqsave(&adev->smc_idx_lock, flags);
135 WREG32(mmMP0PUB_IND_INDEX, (reg));
136 r = RREG32(mmMP0PUB_IND_DATA);
137 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
138 return r;
139}
140
141static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&adev->smc_idx_lock, flags);
146 WREG32(mmMP0PUB_IND_INDEX, (reg));
147 WREG32(mmMP0PUB_IND_DATA, (v));
148 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
149}
150
125static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 151static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
126{ 152{
127 unsigned long flags; 153 unsigned long flags;
@@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
1222 bool smc_enabled = false; 1248 bool smc_enabled = false;
1223 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1224 1250
1225 adev->smc_rreg = &vi_smc_rreg; 1251 if (adev->flags & AMDGPU_IS_APU) {
1226 adev->smc_wreg = &vi_smc_wreg; 1252 adev->smc_rreg = &cz_smc_rreg;
1253 adev->smc_wreg = &cz_smc_wreg;
1254 } else {
1255 adev->smc_rreg = &vi_smc_rreg;
1256 adev->smc_wreg = &vi_smc_wreg;
1257 }
1227 adev->pcie_rreg = &vi_pcie_rreg; 1258 adev->pcie_rreg = &vi_pcie_rreg;
1228 adev->pcie_wreg = &vi_pcie_wreg; 1259 adev->pcie_wreg = &vi_pcie_wreg;
1229 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1260 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
420 pqm_uninit(&p->pqm); 420 pqm_uninit(&p->pqm);
421 421
422 pdd = kfd_get_process_device_data(dev, p); 422 pdd = kfd_get_process_device_data(dev, p);
423
424 if (!pdd) {
425 mutex_unlock(&p->mutex);
426 return;
427 }
428
423 if (pdd->reset_wavefronts) { 429 if (pdd->reset_wavefronts) {
424 dbgdev_wave_reset_wavefronts(pdd->dev, p); 430 dbgdev_wave_reset_wavefronts(pdd->dev, p);
425 pdd->reset_wavefronts = false; 431 pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
431 * We don't call amd_iommu_unbind_pasid() here 437 * We don't call amd_iommu_unbind_pasid() here
432 * because the IOMMU called us. 438 * because the IOMMU called us.
433 */ 439 */
434 if (pdd) 440 pdd->bound = false;
435 pdd->bound = false;
436 441
437 mutex_unlock(&p->mutex); 442 mutex_unlock(&p->mutex);
438} 443}
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 42d2ffa08716..01ffe9bffe38 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
531 531
532 drm_crtc_vblank_off(crtc); 532 drm_crtc_vblank_off(crtc);
533 533
534 crtc->mode = *adj;
535
536 val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA; 534 val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
537 if (val != dcrtc->dumb_ctrl) { 535 if (val != dcrtc->dumb_ctrl) {
538 dcrtc->dumb_ctrl = val; 536 dcrtc->dumb_ctrl = val;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 580e10acaa3a..60a688ef81c7 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
69 69
70 if (dobj->obj.import_attach) { 70 if (dobj->obj.import_attach) {
71 /* We only ever display imported data */ 71 /* We only ever display imported data */
72 dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt, 72 if (dobj->sgt)
73 DMA_TO_DEVICE); 73 dma_buf_unmap_attachment(dobj->obj.import_attach,
74 dobj->sgt, DMA_TO_DEVICE);
74 drm_prime_gem_destroy(&dobj->obj, NULL); 75 drm_prime_gem_destroy(&dobj->obj, NULL);
75 } 76 }
76 77
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c5b06fdb459c..e939faba7fcc 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,6 +7,7 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9#include <drm/drmP.h> 9#include <drm/drmP.h>
10#include <drm/drm_plane_helper.h>
10#include "armada_crtc.h" 11#include "armada_crtc.h"
11#include "armada_drm.h" 12#include "armada_drm.h"
12#include "armada_fb.h" 13#include "armada_fb.h"
@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
85 86
86 if (fb) 87 if (fb)
87 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb); 88 armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
88}
89 89
90static unsigned armada_limit(int start, unsigned size, unsigned max) 90 wake_up(&dplane->vbl.wait);
91{
92 int end = start + size;
93 if (end < 0)
94 return 0;
95 if (start < 0)
96 start = 0;
97 return (unsigned)end > max ? max - start : end - start;
98} 91}
99 92
100static int 93static int
@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
105{ 98{
106 struct armada_plane *dplane = drm_to_armada_plane(plane); 99 struct armada_plane *dplane = drm_to_armada_plane(plane);
107 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 100 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
101 struct drm_rect src = {
102 .x1 = src_x,
103 .y1 = src_y,
104 .x2 = src_x + src_w,
105 .y2 = src_y + src_h,
106 };
107 struct drm_rect dest = {
108 .x1 = crtc_x,
109 .y1 = crtc_y,
110 .x2 = crtc_x + crtc_w,
111 .y2 = crtc_y + crtc_h,
112 };
113 const struct drm_rect clip = {
114 .x2 = crtc->mode.hdisplay,
115 .y2 = crtc->mode.vdisplay,
116 };
108 uint32_t val, ctrl0; 117 uint32_t val, ctrl0;
109 unsigned idx = 0; 118 unsigned idx = 0;
119 bool visible;
110 int ret; 120 int ret;
111 121
112 crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay); 122 ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
113 crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay); 123 0, INT_MAX, true, false, &visible);
124 if (ret)
125 return ret;
126
114 ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) | 127 ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
115 CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) | 128 CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
116 CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA; 129 CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
117 130
118 /* Does the position/size result in nothing to display? */ 131 /* Does the position/size result in nothing to display? */
119 if (crtc_w == 0 || crtc_h == 0) { 132 if (!visible)
120 ctrl0 &= ~CFG_DMA_ENA; 133 ctrl0 &= ~CFG_DMA_ENA;
121 }
122
123 /*
124 * FIXME: if the starting point is off screen, we need to
125 * adjust src_x, src_y, src_w, src_h appropriately, and
126 * according to the scale.
127 */
128 134
129 if (!dcrtc->plane) { 135 if (!dcrtc->plane) {
130 dcrtc->plane = plane; 136 dcrtc->plane = plane;
@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
134 /* FIXME: overlay on an interlaced display */ 140 /* FIXME: overlay on an interlaced display */
135 /* Just updating the position/size? */ 141 /* Just updating the position/size? */
136 if (plane->fb == fb && dplane->ctrl0 == ctrl0) { 142 if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
137 val = (src_h & 0xffff0000) | src_w >> 16; 143 val = (drm_rect_height(&src) & 0xffff0000) |
144 drm_rect_width(&src) >> 16;
138 dplane->src_hw = val; 145 dplane->src_hw = val;
139 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN); 146 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
140 val = crtc_h << 16 | crtc_w; 147
148 val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
141 dplane->dst_hw = val; 149 dplane->dst_hw = val;
142 writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN); 150 writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
143 val = crtc_y << 16 | crtc_x; 151
152 val = dest.y1 << 16 | dest.x1;
144 dplane->dst_yx = val; 153 dplane->dst_yx = val;
145 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN); 154 writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
155
146 return 0; 156 return 0;
147 } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) { 157 } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
148 /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */ 158 /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
150 dcrtc->base + LCD_SPU_SRAM_PARA1); 160 dcrtc->base + LCD_SPU_SRAM_PARA1);
151 } 161 }
152 162
153 ret = wait_event_timeout(dplane->vbl.wait, 163 wait_event_timeout(dplane->vbl.wait,
154 list_empty(&dplane->vbl.update.node), 164 list_empty(&dplane->vbl.update.node),
155 HZ/25); 165 HZ/25);
156 if (ret < 0)
157 return ret;
158 166
159 if (plane->fb != fb) { 167 if (plane->fb != fb) {
160 struct armada_gem_object *obj = drm_fb_obj(fb); 168 struct armada_gem_object *obj = drm_fb_obj(fb);
161 uint32_t sy, su, sv; 169 uint32_t addr[3], pixel_format;
170 int i, num_planes, hsub;
162 171
163 /* 172 /*
164 * Take a reference on the new framebuffer - we want to 173 * Take a reference on the new framebuffer - we want to
@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
178 older_fb); 187 older_fb);
179 } 188 }
180 189
181 src_y >>= 16; 190 src_y = src.y1 >> 16;
182 src_x >>= 16; 191 src_x = src.x1 >> 16;
183 sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
184 src_x * fb->bits_per_pixel / 8;
185 su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
186 src_x;
187 sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
188 src_x;
189 192
190 armada_reg_queue_set(dplane->vbl.regs, idx, sy, 193 pixel_format = fb->pixel_format;
194 hsub = drm_format_horz_chroma_subsampling(pixel_format);
195 num_planes = drm_format_num_planes(pixel_format);
196
197 /*
198 * Annoyingly, shifting a YUYV-format image by one pixel
199 * causes the U/V planes to toggle. Toggle the UV swap.
200 * (Unfortunately, this causes momentary colour flickering.)
201 */
202 if (src_x & (hsub - 1) && num_planes == 1)
203 ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
204
205 for (i = 0; i < num_planes; i++)
206 addr[i] = obj->dev_addr + fb->offsets[i] +
207 src_y * fb->pitches[i] +
208 src_x * drm_format_plane_cpp(pixel_format, i);
209 for (; i < ARRAY_SIZE(addr); i++)
210 addr[i] = 0;
211
212 armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
191 LCD_SPU_DMA_START_ADDR_Y0); 213 LCD_SPU_DMA_START_ADDR_Y0);
192 armada_reg_queue_set(dplane->vbl.regs, idx, su, 214 armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
193 LCD_SPU_DMA_START_ADDR_U0); 215 LCD_SPU_DMA_START_ADDR_U0);
194 armada_reg_queue_set(dplane->vbl.regs, idx, sv, 216 armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
195 LCD_SPU_DMA_START_ADDR_V0); 217 LCD_SPU_DMA_START_ADDR_V0);
196 armada_reg_queue_set(dplane->vbl.regs, idx, sy, 218 armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
197 LCD_SPU_DMA_START_ADDR_Y1); 219 LCD_SPU_DMA_START_ADDR_Y1);
198 armada_reg_queue_set(dplane->vbl.regs, idx, su, 220 armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
199 LCD_SPU_DMA_START_ADDR_U1); 221 LCD_SPU_DMA_START_ADDR_U1);
200 armada_reg_queue_set(dplane->vbl.regs, idx, sv, 222 armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
201 LCD_SPU_DMA_START_ADDR_V1); 223 LCD_SPU_DMA_START_ADDR_V1);
202 224
203 val = fb->pitches[0] << 16 | fb->pitches[0]; 225 val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
208 LCD_SPU_DMA_PITCH_UV); 230 LCD_SPU_DMA_PITCH_UV);
209 } 231 }
210 232
211 val = (src_h & 0xffff0000) | src_w >> 16; 233 val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
212 if (dplane->src_hw != val) { 234 if (dplane->src_hw != val) {
213 dplane->src_hw = val; 235 dplane->src_hw = val;
214 armada_reg_queue_set(dplane->vbl.regs, idx, val, 236 armada_reg_queue_set(dplane->vbl.regs, idx, val,
215 LCD_SPU_DMA_HPXL_VLN); 237 LCD_SPU_DMA_HPXL_VLN);
216 } 238 }
217 val = crtc_h << 16 | crtc_w; 239
240 val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
218 if (dplane->dst_hw != val) { 241 if (dplane->dst_hw != val) {
219 dplane->dst_hw = val; 242 dplane->dst_hw = val;
220 armada_reg_queue_set(dplane->vbl.regs, idx, val, 243 armada_reg_queue_set(dplane->vbl.regs, idx, val,
221 LCD_SPU_DZM_HPXL_VLN); 244 LCD_SPU_DZM_HPXL_VLN);
222 } 245 }
223 val = crtc_y << 16 | crtc_x; 246
247 val = dest.y1 << 16 | dest.x1;
224 if (dplane->dst_yx != val) { 248 if (dplane->dst_yx != val) {
225 dplane->dst_yx = val; 249 dplane->dst_yx = val;
226 armada_reg_queue_set(dplane->vbl.regs, idx, val, 250 armada_reg_queue_set(dplane->vbl.regs, idx, val,
227 LCD_SPU_DMA_OVSA_HPXL_VLN); 251 LCD_SPU_DMA_OVSA_HPXL_VLN);
228 } 252 }
253
229 if (dplane->ctrl0 != ctrl0) { 254 if (dplane->ctrl0 != ctrl0) {
230 dplane->ctrl0 = ctrl0; 255 dplane->ctrl0 = ctrl0;
231 armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0, 256 armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
279 304
280static void armada_plane_destroy(struct drm_plane *plane) 305static void armada_plane_destroy(struct drm_plane *plane)
281{ 306{
282 kfree(plane); 307 struct armada_plane *dplane = drm_to_armada_plane(plane);
308
309 drm_plane_cleanup(plane);
310
311 kfree(dplane);
283} 312}
284 313
285static int armada_plane_set_property(struct drm_plane *plane, 314static int armada_plane_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index f69b92535505..5ae5c6923128 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; 355 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
356 356
357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); 357 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
358 drm_crtc_vblank_reset(&crtc->base);
358 359
359 dc->crtc = &crtc->base; 360 dc->crtc = &crtc->base;
360 361
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..ef6182bc8e5e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
313 313
314 pm_runtime_enable(dev->dev); 314 pm_runtime_enable(dev->dev);
315 315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 316 ret = drm_vblank_init(dev, 1);
317 if (ret < 0) { 317 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 318 dev_err(dev->dev, "failed to initialize vblank\n");
319 goto err_periph_clk_disable; 319 goto err_periph_clk_disable;
320 } 320 }
321 321
322 drm_mode_config_reset(dev); 322 ret = atmel_hlcdc_dc_modeset_init(dev);
323
324 ret = drm_vblank_init(dev, 1);
325 if (ret < 0) { 323 if (ret < 0) {
326 dev_err(dev->dev, "failed to initialize vblank\n"); 324 dev_err(dev->dev, "failed to initialize mode setting\n");
327 goto err_periph_clk_disable; 325 goto err_periph_clk_disable;
328 } 326 }
329 327
328 drm_mode_config_reset(dev);
329
330 pm_runtime_get_sync(dev->dev); 330 pm_runtime_get_sync(dev->dev);
331 ret = drm_irq_install(dev, dc->hlcdc->irq); 331 ret = drm_irq_install(dev, dc->hlcdc->irq);
332 pm_runtime_put_sync(dev->dev); 332 pm_runtime_put_sync(dev->dev);
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
559 return 0; 559 return 0;
560} 560}
561 561
562#ifdef CONFIG_PM 562#ifdef CONFIG_PM_SLEEP
563static int atmel_hlcdc_dc_drm_suspend(struct device *dev) 563static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
564{ 564{
565 struct drm_device *drm_dev = dev_get_drvdata(dev); 565 struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5b59d5ad7d1c..9dcc7280e572 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
196 } 196 }
197 197
198 funcs = connector->helper_private; 198 funcs = connector->helper_private;
199 new_encoder = funcs->best_encoder(connector); 199
200 if (funcs->atomic_best_encoder)
201 new_encoder = funcs->atomic_best_encoder(connector,
202 connector_state);
203 else
204 new_encoder = funcs->best_encoder(connector);
200 205
201 if (!new_encoder) { 206 if (!new_encoder) {
202 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 207 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
229 } 234 }
230 } 235 }
231 236
237 if (WARN_ON(!connector_state->crtc))
238 return -EINVAL;
239
232 connector_state->best_encoder = new_encoder; 240 connector_state->best_encoder = new_encoder;
233 idx = drm_crtc_index(connector_state->crtc); 241 idx = drm_crtc_index(connector_state->crtc);
234 242
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index b9ba06176eb1..fed748311b92 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2706 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2706 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2707 return -EINVAL; 2707 return -EINVAL;
2708 2708
2709 /* For some reason crtc x/y offsets are signed internally. */ 2709 /*
2710 if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) 2710 * Universal plane src offsets are only 16.16, prevent havoc for
2711 * drivers using universal plane code internally.
2712 */
2713 if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
2711 return -ERANGE; 2714 return -ERANGE;
2712 2715
2713 drm_modeset_lock_all(dev); 2716 drm_modeset_lock_all(dev);
@@ -5395,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
5395 if (encoder->funcs->reset) 5398 if (encoder->funcs->reset)
5396 encoder->funcs->reset(encoder); 5399 encoder->funcs->reset(encoder);
5397 5400
5398 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5401 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
5399 connector->status = connector_status_unknown;
5400
5401 if (connector->funcs->reset) 5402 if (connector->funcs->reset)
5402 connector->funcs->reset(connector); 5403 connector->funcs->reset(connector);
5403 }
5404} 5404}
5405EXPORT_SYMBOL(drm_mode_config_reset); 5405EXPORT_SYMBOL(drm_mode_config_reset);
5406 5406
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..eb603f1defc2 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
873 from an EDID retrieval */ 873 from an EDID retrieval */
874 if (port->connector) { 874 if (port->connector) {
875 mutex_lock(&mgr->destroy_connector_lock); 875 mutex_lock(&mgr->destroy_connector_lock);
876 list_add(&port->connector->destroy_list, &mgr->destroy_connector_list); 876 list_add(&port->next, &mgr->destroy_connector_list);
877 mutex_unlock(&mgr->destroy_connector_lock); 877 mutex_unlock(&mgr->destroy_connector_lock);
878 schedule_work(&mgr->destroy_connector_work); 878 schedule_work(&mgr->destroy_connector_work);
879 return;
879 } 880 }
880 drm_dp_port_teardown_pdt(port, port->pdt); 881 drm_dp_port_teardown_pdt(port, port->pdt);
881 882
@@ -1294,7 +1295,6 @@ retry:
1294 goto retry; 1295 goto retry;
1295 } 1296 }
1296 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1297 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1297 WARN(1, "fail\n");
1298 1298
1299 return -EIO; 1299 return -EIO;
1300 } 1300 }
@@ -2660,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
2660static void drm_dp_destroy_connector_work(struct work_struct *work) 2660static void drm_dp_destroy_connector_work(struct work_struct *work)
2661{ 2661{
2662 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2662 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2663 struct drm_connector *connector; 2663 struct drm_dp_mst_port *port;
2664 2664
2665 /* 2665 /*
2666 * Not a regular list traverse as we have to drop the destroy 2666 * Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2669 */ 2669 */
2670 for (;;) { 2670 for (;;) {
2671 mutex_lock(&mgr->destroy_connector_lock); 2671 mutex_lock(&mgr->destroy_connector_lock);
2672 connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list); 2672 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2673 if (!connector) { 2673 if (!port) {
2674 mutex_unlock(&mgr->destroy_connector_lock); 2674 mutex_unlock(&mgr->destroy_connector_lock);
2675 break; 2675 break;
2676 } 2676 }
2677 list_del(&connector->destroy_list); 2677 list_del(&port->next);
2678 mutex_unlock(&mgr->destroy_connector_lock); 2678 mutex_unlock(&mgr->destroy_connector_lock);
2679 2679
2680 mgr->cbs->destroy_connector(mgr, connector); 2680 mgr->cbs->destroy_connector(mgr, port->connector);
2681
2682 drm_dp_port_teardown_pdt(port, port->pdt);
2683
2684 if (!port->input && port->vcpi.vcpi > 0)
2685 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2686 kfree(port);
2681 } 2687 }
2682} 2688}
2683 2689
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index aa8bbb460c57..9cfcd0aef0df 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -70,6 +70,8 @@
70 70
71#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t) 71#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
72 72
73#define DRM_IOCTL_MODE_ADDFB232 DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
74
73typedef struct drm_version_32 { 75typedef struct drm_version_32 {
74 int version_major; /**< Major version */ 76 int version_major; /**< Major version */
75 int version_minor; /**< Minor version */ 77 int version_minor; /**< Minor version */
@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
1016 return 0; 1018 return 0;
1017} 1019}
1018 1020
1021typedef struct drm_mode_fb_cmd232 {
1022 u32 fb_id;
1023 u32 width;
1024 u32 height;
1025 u32 pixel_format;
1026 u32 flags;
1027 u32 handles[4];
1028 u32 pitches[4];
1029 u32 offsets[4];
1030 u64 modifier[4];
1031} __attribute__((packed)) drm_mode_fb_cmd232_t;
1032
1033static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
1034 unsigned long arg)
1035{
1036 struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
1037 struct drm_mode_fb_cmd232 req32;
1038 struct drm_mode_fb_cmd2 __user *req64;
1039 int i;
1040 int err;
1041
1042 if (copy_from_user(&req32, argp, sizeof(req32)))
1043 return -EFAULT;
1044
1045 req64 = compat_alloc_user_space(sizeof(*req64));
1046
1047 if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
1048 || __put_user(req32.width, &req64->width)
1049 || __put_user(req32.height, &req64->height)
1050 || __put_user(req32.pixel_format, &req64->pixel_format)
1051 || __put_user(req32.flags, &req64->flags))
1052 return -EFAULT;
1053
1054 for (i = 0; i < 4; i++) {
1055 if (__put_user(req32.handles[i], &req64->handles[i]))
1056 return -EFAULT;
1057 if (__put_user(req32.pitches[i], &req64->pitches[i]))
1058 return -EFAULT;
1059 if (__put_user(req32.offsets[i], &req64->offsets[i]))
1060 return -EFAULT;
1061 if (__put_user(req32.modifier[i], &req64->modifier[i]))
1062 return -EFAULT;
1063 }
1064
1065 err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
1066 if (err)
1067 return err;
1068
1069 if (__get_user(req32.fb_id, &req64->fb_id))
1070 return -EFAULT;
1071
1072 if (copy_to_user(argp, &req32, sizeof(req32)))
1073 return -EFAULT;
1074
1075 return 0;
1076}
1077
1019static drm_ioctl_compat_t *drm_compat_ioctls[] = { 1078static drm_ioctl_compat_t *drm_compat_ioctls[] = {
1020 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version, 1079 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
1021 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique, 1080 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
1048 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw, 1107 [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
1049#endif 1108#endif
1050 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank, 1109 [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
1110 [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
1051}; 1111};
1052 1112
1053/** 1113/**
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f9cc68fbd2a3..b50fa0afd907 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76 76
77static void store_vblank(struct drm_device *dev, int crtc, 77static void store_vblank(struct drm_device *dev, int crtc,
78 unsigned vblank_count_inc, 78 u32 vblank_count_inc,
79 struct timeval *t_vblank) 79 struct timeval *t_vblank)
80{ 80{
81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
1745 spin_lock_init(&ctx->lock); 1745 spin_lock_init(&ctx->lock);
1746 platform_set_drvdata(pdev, ctx); 1746 platform_set_drvdata(pdev, ctx);
1747 1747
1748 pm_runtime_set_active(dev);
1749 pm_runtime_enable(dev); 1748 pm_runtime_enable(dev);
1750 1749
1751 ret = exynos_drm_ippdrv_register(ippdrv); 1750 ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..f1c6b76c127f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
593 593
594 gsc_write(cfg, GSC_IN_CON); 594 gsc_write(cfg, GSC_IN_CON);
595 595
596 ctx->rotation = cfg & 596 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
597 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
598 *swap = ctx->rotation; 597 *swap = ctx->rotation;
599 598
600 return 0; 599 return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
857 856
858 gsc_write(cfg, GSC_IN_CON); 857 gsc_write(cfg, GSC_IN_CON);
859 858
860 ctx->rotation = cfg & 859 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
861 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
862 *swap = ctx->rotation; 860 *swap = ctx->rotation;
863 861
864 return 0; 862 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..4a00990e4ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
1064{ 1064{
1065 struct hdmi_context *hdata = ctx_from_connector(connector); 1065 struct hdmi_context *hdata = ctx_from_connector(connector);
1066 struct edid *edid; 1066 struct edid *edid;
1067 int ret;
1067 1068
1068 if (!hdata->ddc_adpt) 1069 if (!hdata->ddc_adpt)
1069 return -ENODEV; 1070 return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
1079 1080
1080 drm_mode_connector_update_edid_property(connector, edid); 1081 drm_mode_connector_update_edid_property(connector, edid);
1081 1082
1082 return drm_add_edid_modes(connector, edid); 1083 ret = drm_add_edid_modes(connector, edid);
1084
1085 kfree(edid);
1086
1087 return ret;
1083} 1088}
1084 1089
1085static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) 1090static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..4706b56902b4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
718 718
719 /* handling VSYNC */ 719 /* handling VSYNC */
720 if (val & MXR_INT_STATUS_VSYNC) { 720 if (val & MXR_INT_STATUS_VSYNC) {
721 /* vsync interrupt use different bit for read and clear */
722 val |= MXR_INT_CLEAR_VSYNC;
723 val &= ~MXR_INT_STATUS_VSYNC;
724
721 /* interlace scan need to check shadow register */ 725 /* interlace scan need to check shadow register */
722 if (ctx->interlace) { 726 if (ctx->interlace) {
723 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); 727 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
743 747
744out: 748out:
745 /* clear interrupts */ 749 /* clear interrupts */
746 if (~val & MXR_INT_EN_VSYNC) {
747 /* vsync interrupt use different bit for read and clear */
748 val &= ~MXR_INT_EN_VSYNC;
749 val |= MXR_INT_CLEAR_VSYNC;
750 }
751 mixer_reg_write(res, MXR_INT_STATUS, val); 750 mixer_reg_write(res, MXR_INT_STATUS, val);
752 751
753 spin_unlock(&res->reg_slock); 752 spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
907 } 906 }
908 907
909 /* enable vsync interrupt */ 908 /* enable vsync interrupt */
910 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, 909 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
911 MXR_INT_EN_VSYNC); 910 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
912 911
913 return 0; 912 return 0;
914} 913}
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
918 struct mixer_context *mixer_ctx = crtc->ctx; 917 struct mixer_context *mixer_ctx = crtc->ctx;
919 struct mixer_resources *res = &mixer_ctx->mixer_res; 918 struct mixer_resources *res = &mixer_ctx->mixer_res;
920 919
920 if (!mixer_ctx->powered) {
921 mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
922 return;
923 }
924
921 /* disable vsync interrupt */ 925 /* disable vsync interrupt */
926 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
922 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 927 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
923} 928}
924 929
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1047 1052
1048 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1053 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1049 1054
1055 if (ctx->int_en & MXR_INT_EN_VSYNC)
1056 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
1050 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1057 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
1051 mixer_win_reset(ctx); 1058 mixer_win_reset(ctx);
1052} 1059}
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 2aaa3c88999e..00416f23b5cb 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -54,7 +54,7 @@ static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
54} 54}
55 55
56/* ADI recommended values for proper operation. */ 56/* ADI recommended values for proper operation. */
57static const struct reg_default adv7511_fixed_registers[] = { 57static const struct reg_sequence adv7511_fixed_registers[] = {
58 { 0x98, 0x03 }, 58 { 0x98, 0x03 },
59 { 0x9a, 0xe0 }, 59 { 0x9a, 0xe0 },
60 { 0x9c, 0x30 }, 60 { 0x9c, 0x30 },
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, 606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
607 uint8_t *buf, size_t size) 607 uint8_t *buf, size_t size)
608{ 608{
609 buf[PB(0)] = tda998x_cksum(buf, size);
610
611 reg_clear(priv, REG_DIP_IF_FLAGS, bit); 609 reg_clear(priv, REG_DIP_IF_FLAGS, bit);
612 reg_write_range(priv, addr, buf, size); 610 reg_write_range(priv, addr, buf, size);
613 reg_set(priv, REG_DIP_IF_FLAGS, bit); 611 reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
627 buf[PB(4)] = p->audio_frame[4]; 625 buf[PB(4)] = p->audio_frame[4];
628 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ 626 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
629 627
628 buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
629
630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, 630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
631 sizeof(buf)); 631 sizeof(buf));
632} 632}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 542fac628b28..fd1de451c8c6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -826,6 +826,7 @@ struct intel_context {
826 struct kref ref; 826 struct kref ref;
827 int user_handle; 827 int user_handle;
828 uint8_t remap_slice; 828 uint8_t remap_slice;
829 struct drm_i915_private *i915;
829 struct drm_i915_file_private *file_priv; 830 struct drm_i915_file_private *file_priv;
830 struct i915_ctx_hang_stats hang_stats; 831 struct i915_ctx_hang_stats hang_stats;
831 struct i915_hw_ppgtt *ppgtt; 832 struct i915_hw_ppgtt *ppgtt;
@@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
2036 unsigned int cache_level:3; 2037 unsigned int cache_level:3;
2037 unsigned int cache_dirty:1; 2038 unsigned int cache_dirty:1;
2038 2039
2039 unsigned int has_dma_mapping:1;
2040
2041 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2040 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
2042 2041
2043 unsigned int pin_display; 2042 unsigned int pin_display;
@@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
3116int i915_debugfs_connector_add(struct drm_connector *connector); 3115int i915_debugfs_connector_add(struct drm_connector *connector);
3117void intel_display_crc_init(struct drm_device *dev); 3116void intel_display_crc_init(struct drm_device *dev);
3118#else 3117#else
3119static inline int i915_debugfs_connector_add(struct drm_connector *connector) {} 3118static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3119{ return 0; }
3120static inline void intel_display_crc_init(struct drm_device *dev) {} 3120static inline void intel_display_crc_init(struct drm_device *dev) {}
3121#endif 3121#endif
3122 3122
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3303#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3304 3304
3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3305#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3306 u32 upper = I915_READ(upper_reg); \ 3306 u32 upper, lower, tmp; \
3307 u32 lower = I915_READ(lower_reg); \ 3307 tmp = I915_READ(upper_reg); \
3308 u32 tmp = I915_READ(upper_reg); \ 3308 do { \
3309 if (upper != tmp) { \ 3309 upper = tmp; \
3310 upper = tmp; \ 3310 lower = I915_READ(lower_reg); \
3311 lower = I915_READ(lower_reg); \ 3311 tmp = I915_READ(upper_reg); \
3312 WARN_ON(I915_READ(upper_reg) != upper); \ 3312 } while (upper != tmp); \
3313 } \ 3313 (u64)upper << 32 | lower; })
3314 (u64)upper << 32 | lower; })
3315 3314
3316#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3315#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3317#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3316#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 248fd1ac7b3a..52b446b27b4d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
213 sg_dma_len(sg) = obj->base.size; 213 sg_dma_len(sg) = obj->base.size;
214 214
215 obj->pages = st; 215 obj->pages = st;
216 obj->has_dma_mapping = true;
217 return 0; 216 return 0;
218} 217}
219 218
@@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
265 264
266 sg_free_table(obj->pages); 265 sg_free_table(obj->pages);
267 kfree(obj->pages); 266 kfree(obj->pages);
268
269 obj->has_dma_mapping = false;
270} 267}
271 268
272static void 269static void
@@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2139 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2136 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2140 } 2137 }
2141 2138
2139 i915_gem_gtt_finish_object(obj);
2140
2142 if (i915_gem_object_needs_bit17_swizzle(obj)) 2141 if (i915_gem_object_needs_bit17_swizzle(obj))
2143 i915_gem_object_save_bit_17_swizzle(obj); 2142 i915_gem_object_save_bit_17_swizzle(obj);
2144 2143
@@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2199 struct sg_page_iter sg_iter; 2198 struct sg_page_iter sg_iter;
2200 struct page *page; 2199 struct page *page;
2201 unsigned long last_pfn = 0; /* suppress gcc warning */ 2200 unsigned long last_pfn = 0; /* suppress gcc warning */
2201 int ret;
2202 gfp_t gfp; 2202 gfp_t gfp;
2203 2203
2204 /* Assert that the object is not currently in any GPU domain. As it 2204 /* Assert that the object is not currently in any GPU domain. As it
@@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2246 */ 2246 */
2247 i915_gem_shrink_all(dev_priv); 2247 i915_gem_shrink_all(dev_priv);
2248 page = shmem_read_mapping_page(mapping, i); 2248 page = shmem_read_mapping_page(mapping, i);
2249 if (IS_ERR(page)) 2249 if (IS_ERR(page)) {
2250 ret = PTR_ERR(page);
2250 goto err_pages; 2251 goto err_pages;
2252 }
2251 } 2253 }
2252#ifdef CONFIG_SWIOTLB 2254#ifdef CONFIG_SWIOTLB
2253 if (swiotlb_nr_tbl()) { 2255 if (swiotlb_nr_tbl()) {
@@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2276 sg_mark_end(sg); 2278 sg_mark_end(sg);
2277 obj->pages = st; 2279 obj->pages = st;
2278 2280
2281 ret = i915_gem_gtt_prepare_object(obj);
2282 if (ret)
2283 goto err_pages;
2284
2279 if (i915_gem_object_needs_bit17_swizzle(obj)) 2285 if (i915_gem_object_needs_bit17_swizzle(obj))
2280 i915_gem_object_do_bit_17_swizzle(obj); 2286 i915_gem_object_do_bit_17_swizzle(obj);
2281 2287
@@ -2300,10 +2306,10 @@ err_pages:
2300 * space and so want to translate the error from shmemfs back to our 2306 * space and so want to translate the error from shmemfs back to our
2301 * usual understanding of ENOMEM. 2307 * usual understanding of ENOMEM.
2302 */ 2308 */
2303 if (PTR_ERR(page) == -ENOSPC) 2309 if (ret == -ENOSPC)
2304 return -ENOMEM; 2310 ret = -ENOMEM;
2305 else 2311
2306 return PTR_ERR(page); 2312 return ret;
2307} 2313}
2308 2314
2309/* Ensure that the associated pages are gathered from the backing storage 2315/* Ensure that the associated pages are gathered from the backing storage
@@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
2542 } 2548 }
2543 2549
2544 request->emitted_jiffies = jiffies; 2550 request->emitted_jiffies = jiffies;
2551 ring->last_submitted_seqno = request->seqno;
2545 list_add_tail(&request->list, &ring->request_list); 2552 list_add_tail(&request->list, &ring->request_list);
2546 request->file_priv = NULL; 2553 request->file_priv = NULL;
2547 2554
@@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
3247 3254
3248 /* Since the unbound list is global, only move to that list if 3255 /* Since the unbound list is global, only move to that list if
3249 * no more VMAs exist. */ 3256 * no more VMAs exist. */
3250 if (list_empty(&obj->vma_list)) { 3257 if (list_empty(&obj->vma_list))
3251 i915_gem_gtt_finish_object(obj);
3252 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); 3258 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3253 }
3254 3259
3255 /* And finally now the object is completely decoupled from this vma, 3260 /* And finally now the object is completely decoupled from this vma,
3256 * we can drop its hold on the backing storage and allow it to be 3261 * we can drop its hold on the backing storage and allow it to be
@@ -3768,22 +3773,16 @@ search_free:
3768 goto err_remove_node; 3773 goto err_remove_node;
3769 } 3774 }
3770 3775
3771 ret = i915_gem_gtt_prepare_object(obj);
3772 if (ret)
3773 goto err_remove_node;
3774
3775 trace_i915_vma_bind(vma, flags); 3776 trace_i915_vma_bind(vma, flags);
3776 ret = i915_vma_bind(vma, obj->cache_level, flags); 3777 ret = i915_vma_bind(vma, obj->cache_level, flags);
3777 if (ret) 3778 if (ret)
3778 goto err_finish_gtt; 3779 goto err_remove_node;
3779 3780
3780 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3781 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3781 list_add_tail(&vma->mm_list, &vm->inactive_list); 3782 list_add_tail(&vma->mm_list, &vm->inactive_list);
3782 3783
3783 return vma; 3784 return vma;
3784 3785
3785err_finish_gtt:
3786 i915_gem_gtt_finish_object(obj);
3787err_remove_node: 3786err_remove_node:
3788 drm_mm_remove_node(&vma->node); 3787 drm_mm_remove_node(&vma->node);
3789err_free_vma: 3788err_free_vma:
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..48afa777e94a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
135 135
136void i915_gem_context_free(struct kref *ctx_ref) 136void i915_gem_context_free(struct kref *ctx_ref)
137{ 137{
138 struct intel_context *ctx = container_of(ctx_ref, 138 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
139 typeof(*ctx), ref);
140 139
141 trace_i915_context_free(ctx); 140 trace_i915_context_free(ctx);
142 141
@@ -157,9 +156,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
157 struct drm_i915_gem_object *obj; 156 struct drm_i915_gem_object *obj;
158 int ret; 157 int ret;
159 158
160 obj = i915_gem_object_create_stolen(dev, size); 159 obj = i915_gem_alloc_object(dev, size);
161 if (obj == NULL)
162 obj = i915_gem_alloc_object(dev, size);
163 if (obj == NULL) 160 if (obj == NULL)
164 return ERR_PTR(-ENOMEM); 161 return ERR_PTR(-ENOMEM);
165 162
@@ -197,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
197 194
198 kref_init(&ctx->ref); 195 kref_init(&ctx->ref);
199 list_add_tail(&ctx->link, &dev_priv->context_list); 196 list_add_tail(&ctx->link, &dev_priv->context_list);
197 ctx->i915 = dev_priv;
200 198
201 if (dev_priv->hw_context_size) { 199 if (dev_priv->hw_context_size) {
202 struct drm_i915_gem_object *obj = 200 struct drm_i915_gem_object *obj =
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7998da27c500..e9c2bfd85b52 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
256 return PTR_ERR(sg); 256 return PTR_ERR(sg);
257 257
258 obj->pages = sg; 258 obj->pages = sg;
259 obj->has_dma_mapping = true;
260 return 0; 259 return 0;
261} 260}
262 261
@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
264{ 263{
265 dma_buf_unmap_attachment(obj->base.import_attach, 264 dma_buf_unmap_attachment(obj->base.import_attach,
266 obj->pages, DMA_BIDIRECTIONAL); 265 obj->pages, DMA_BIDIRECTIONAL);
267 obj->has_dma_mapping = false;
268} 266}
269 267
270static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { 268static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9daa2883ac18..31e8269e6e3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1723 1723
1724int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 1724int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1725{ 1725{
1726 if (obj->has_dma_mapping)
1727 return 0;
1728
1729 if (!dma_map_sg(&obj->base.dev->pdev->dev, 1726 if (!dma_map_sg(&obj->base.dev->pdev->dev,
1730 obj->pages->sgl, obj->pages->nents, 1727 obj->pages->sgl, obj->pages->nents,
1731 PCI_DMA_BIDIRECTIONAL)) 1728 PCI_DMA_BIDIRECTIONAL))
@@ -1926,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
1926 vma->vm->insert_entries(vma->vm, pages, 1923 vma->vm->insert_entries(vma->vm, pages,
1927 vma->node.start, 1924 vma->node.start,
1928 cache_level, pte_flags); 1925 cache_level, pte_flags);
1926
1927 /* Note the inconsistency here is due to absence of the
1928 * aliasing ppgtt on gen4 and earlier. Though we always
1929 * request PIN_USER for execbuffer (translated to LOCAL_BIND),
1930 * without the appgtt, we cannot honour that request and so
1931 * must substitute it with a global binding. Since we do this
1932 * behind the upper layers back, we need to explicitly set
1933 * the bound flag ourselves.
1934 */
1935 vma->bound |= GLOBAL_BIND;
1936
1929 } 1937 }
1930 1938
1931 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { 1939 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -1972,10 +1980,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1972 1980
1973 interruptible = do_idling(dev_priv); 1981 interruptible = do_idling(dev_priv);
1974 1982
1975 if (!obj->has_dma_mapping) 1983 dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
1976 dma_unmap_sg(&dev->pdev->dev, 1984 PCI_DMA_BIDIRECTIONAL);
1977 obj->pages->sgl, obj->pages->nents,
1978 PCI_DMA_BIDIRECTIONAL);
1979 1985
1980 undo_idling(dev_priv, interruptible); 1986 undo_idling(dev_priv, interruptible);
1981} 1987}
@@ -2546,6 +2552,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2546 struct drm_i915_private *dev_priv = dev->dev_private; 2552 struct drm_i915_private *dev_priv = dev->dev_private;
2547 struct drm_i915_gem_object *obj; 2553 struct drm_i915_gem_object *obj;
2548 struct i915_address_space *vm; 2554 struct i915_address_space *vm;
2555 struct i915_vma *vma;
2556 bool flush;
2549 2557
2550 i915_check_and_clear_faults(dev); 2558 i915_check_and_clear_faults(dev);
2551 2559
@@ -2555,16 +2563,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2555 dev_priv->gtt.base.total, 2563 dev_priv->gtt.base.total,
2556 true); 2564 true);
2557 2565
2566 /* Cache flush objects bound into GGTT and rebind them. */
2567 vm = &dev_priv->gtt.base;
2558 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2568 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2559 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 2569 flush = false;
2560 &dev_priv->gtt.base); 2570 list_for_each_entry(vma, &obj->vma_list, vma_link) {
2561 if (!vma) 2571 if (vma->vm != vm)
2562 continue; 2572 continue;
2563 2573
2564 i915_gem_clflush_object(obj, obj->pin_display); 2574 WARN_ON(i915_vma_bind(vma, obj->cache_level,
2565 WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); 2575 PIN_UPDATE));
2566 }
2567 2576
2577 flush = true;
2578 }
2579
2580 if (flush)
2581 i915_gem_clflush_object(obj, obj->pin_display);
2582 }
2568 2583
2569 if (INTEL_INFO(dev)->gen >= 8) { 2584 if (INTEL_INFO(dev)->gen >= 8) {
2570 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 2585 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 348ed5abcdbf..8b5b784c62fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
416 if (obj->pages == NULL) 416 if (obj->pages == NULL)
417 goto cleanup; 417 goto cleanup;
418 418
419 obj->has_dma_mapping = true;
420 i915_gem_object_pin_pages(obj); 419 i915_gem_object_pin_pages(obj);
421 obj->stolen = stolen; 420 obj->stolen = stolen;
422 421
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d19c9db5e18c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
464 } 464 }
465 465
466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
467 args->phys_swizzle_mode = args->swizzle_mode; 467 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
468 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
469 else
470 args->phys_swizzle_mode = args->swizzle_mode;
468 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 471 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
469 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 472 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
470 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 473 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1f4e5a32a16e..8fd431bcdfd3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -545,6 +545,26 @@ err:
545 return ret; 545 return ret;
546} 546}
547 547
548static int
549__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
550 struct page **pvec, int num_pages)
551{
552 int ret;
553
554 ret = st_set_pages(&obj->pages, pvec, num_pages);
555 if (ret)
556 return ret;
557
558 ret = i915_gem_gtt_prepare_object(obj);
559 if (ret) {
560 sg_free_table(obj->pages);
561 kfree(obj->pages);
562 obj->pages = NULL;
563 }
564
565 return ret;
566}
567
548static void 568static void
549__i915_gem_userptr_get_pages_worker(struct work_struct *_work) 569__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
550{ 570{
@@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
584 if (obj->userptr.work != &work->work) { 604 if (obj->userptr.work != &work->work) {
585 ret = 0; 605 ret = 0;
586 } else if (pinned == num_pages) { 606 } else if (pinned == num_pages) {
587 ret = st_set_pages(&obj->pages, pvec, num_pages); 607 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
588 if (ret == 0) { 608 if (ret == 0) {
589 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); 609 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
610 obj->get_page.sg = obj->pages->sgl;
611 obj->get_page.last = 0;
612
590 pinned = 0; 613 pinned = 0;
591 } 614 }
592 } 615 }
@@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
693 } 716 }
694 } 717 }
695 } else { 718 } else {
696 ret = st_set_pages(&obj->pages, pvec, num_pages); 719 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
697 if (ret == 0) { 720 if (ret == 0) {
698 obj->userptr.work = NULL; 721 obj->userptr.work = NULL;
699 pinned = 0; 722 pinned = 0;
@@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
715 if (obj->madv != I915_MADV_WILLNEED) 738 if (obj->madv != I915_MADV_WILLNEED)
716 obj->dirty = 0; 739 obj->dirty = 0;
717 740
741 i915_gem_gtt_finish_object(obj);
742
718 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 743 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
719 struct page *page = sg_page_iter_page(&sg_iter); 744 struct page *page = sg_page_iter_page(&sg_iter);
720 745
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 176de6322e4d..23aa04cded6b 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
204 drm_ioctl_compat_t *fn = NULL; 204 drm_ioctl_compat_t *fn = NULL;
205 int ret; 205 int ret;
206 206
207 if (nr < DRM_COMMAND_BASE) 207 if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
208 return drm_compat_ioctl(filp, cmd, arg); 208 return drm_compat_ioctl(filp, cmd, arg);
209 209
210 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) 210 if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e6bb72dca3ff..984e2fe6688c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2707} 2707}
2708 2708
2709static struct drm_i915_gem_request *
2710ring_last_request(struct intel_engine_cs *ring)
2711{
2712 return list_entry(ring->request_list.prev,
2713 struct drm_i915_gem_request, list);
2714}
2715
2716static bool 2709static bool
2717ring_idle(struct intel_engine_cs *ring) 2710ring_idle(struct intel_engine_cs *ring, u32 seqno)
2718{ 2711{
2719 return (list_empty(&ring->request_list) || 2712 return (list_empty(&ring->request_list) ||
2720 i915_gem_request_completed(ring_last_request(ring), false)); 2713 i915_seqno_passed(seqno, ring->last_submitted_seqno));
2721} 2714}
2722 2715
2723static bool 2716static bool
@@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
2939 acthd = intel_ring_get_active_head(ring); 2932 acthd = intel_ring_get_active_head(ring);
2940 2933
2941 if (ring->hangcheck.seqno == seqno) { 2934 if (ring->hangcheck.seqno == seqno) {
2942 if (ring_idle(ring)) { 2935 if (ring_idle(ring, seqno)) {
2943 ring->hangcheck.action = HANGCHECK_IDLE; 2936 ring->hangcheck.action = HANGCHECK_IDLE;
2944 2937
2945 if (waitqueue_active(&ring->irq_queue)) { 2938 if (waitqueue_active(&ring->irq_queue)) {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 497cba5deb1e..849a2590e010 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
727 TP_fast_assign( 727 TP_fast_assign(
728 __entry->ctx = ctx; 728 __entry->ctx = ctx;
729 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; 729 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
730 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index; 730 __entry->dev = ctx->i915->dev->primary->index;
731 ), 731 ),
732 732
733 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 733 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033aae60..8e35e0d013df 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
129 struct drm_atomic_state *state, 129 struct drm_atomic_state *state,
130 bool async) 130 bool async)
131{ 131{
132 int ret; 132 struct drm_crtc_state *crtc_state;
133 int i; 133 struct drm_crtc *crtc;
134 int ret, i;
134 135
135 if (async) { 136 if (async) {
136 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 137 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
142 return ret; 143 return ret;
143 144
144 /* Point of no return */ 145 /* Point of no return */
145 146 drm_atomic_helper_swap_state(dev, state);
146 /*
147 * FIXME: The proper sequence here will eventually be:
148 *
149 * drm_atomic_helper_swap_state(dev, state)
150 * drm_atomic_helper_commit_modeset_disables(dev, state);
151 * drm_atomic_helper_commit_planes(dev, state);
152 * drm_atomic_helper_commit_modeset_enables(dev, state);
153 * drm_atomic_helper_wait_for_vblanks(dev, state);
154 * drm_atomic_helper_cleanup_planes(dev, state);
155 * drm_atomic_state_free(state);
156 *
157 * once we have full atomic modeset. For now, just manually update
158 * plane states to avoid clobbering good states with dummy states
159 * while nuclear pageflipping.
160 */
161 for (i = 0; i < dev->mode_config.num_total_plane; i++) {
162 struct drm_plane *plane = state->planes[i];
163
164 if (!plane)
165 continue;
166
167 plane->state->state = state;
168 swap(state->plane_states[i], plane->state);
169 plane->state->state = NULL;
170 }
171 147
172 /* swap crtc_scaler_state */ 148 /* swap crtc_scaler_state */
173 for (i = 0; i < dev->mode_config.num_crtc; i++) { 149 for_each_crtc_in_state(state, crtc, crtc_state, i) {
174 struct drm_crtc *crtc = state->crtcs[i]; 150 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
175 if (!crtc) {
176 continue;
177 }
178
179 to_intel_crtc(crtc)->config->scaler_state =
180 to_intel_crtc_state(state->crtc_states[i])->scaler_state;
181 151
182 if (INTEL_INFO(dev)->gen >= 9) 152 if (INTEL_INFO(dev)->gen >= 9)
183 skl_detach_scalers(to_intel_crtc(crtc)); 153 skl_detach_scalers(to_intel_crtc(crtc));
154
155 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
184 } 156 }
185 157
186 drm_atomic_helper_commit_planes(dev, state);
187 drm_atomic_helper_wait_for_vblanks(dev, state); 158 drm_atomic_helper_wait_for_vblanks(dev, state);
188 drm_atomic_helper_cleanup_planes(dev, state); 159 drm_atomic_helper_cleanup_planes(dev, state);
189 drm_atomic_state_free(state); 160 drm_atomic_state_free(state);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b61f9810387..87476ff181dd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4854,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4854 struct intel_plane *intel_plane; 4854 struct intel_plane *intel_plane;
4855 int pipe = intel_crtc->pipe; 4855 int pipe = intel_crtc->pipe;
4856 4856
4857 if (!intel_crtc->active)
4858 return;
4859
4857 intel_crtc_wait_for_pending_flips(crtc); 4860 intel_crtc_wait_for_pending_flips(crtc);
4858 4861
4859 intel_pre_disable_primary(crtc); 4862 intel_pre_disable_primary(crtc);
@@ -6312,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
6312 struct drm_connector *connector; 6315 struct drm_connector *connector;
6313 struct drm_i915_private *dev_priv = dev->dev_private; 6316 struct drm_i915_private *dev_priv = dev->dev_private;
6314 6317
6315 /* crtc should still be enabled when we disable it. */
6316 WARN_ON(!crtc->state->enable);
6317
6318 intel_crtc_disable_planes(crtc); 6318 intel_crtc_disable_planes(crtc);
6319 dev_priv->display.crtc_disable(crtc); 6319 dev_priv->display.crtc_disable(crtc);
6320 dev_priv->display.off(crtc); 6320 dev_priv->display.off(crtc);
@@ -7887,7 +7887,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7887 int pipe = pipe_config->cpu_transcoder; 7887 int pipe = pipe_config->cpu_transcoder;
7888 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7888 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7889 intel_clock_t clock; 7889 intel_clock_t clock;
7890 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 7890 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7891 int refclk = 100000; 7891 int refclk = 100000;
7892 7892
7893 mutex_lock(&dev_priv->sb_lock); 7893 mutex_lock(&dev_priv->sb_lock);
@@ -7895,10 +7895,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7895 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7895 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7896 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7896 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7897 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7897 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7898 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7898 mutex_unlock(&dev_priv->sb_lock); 7899 mutex_unlock(&dev_priv->sb_lock);
7899 7900
7900 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7901 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7901 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 7902 clock.m2 = (pll_dw0 & 0xff) << 22;
7903 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7904 clock.m2 |= pll_dw2 & 0x3fffff;
7902 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7905 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7903 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7906 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7904 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7907 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
@@ -11823,7 +11826,9 @@ encoder_retry:
11823 goto encoder_retry; 11826 goto encoder_retry;
11824 } 11827 }
11825 11828
11826 pipe_config->dither = pipe_config->pipe_bpp != base_bpp; 11829 /* Dithering seems to not pass-through bits correctly when it should, so
11830 * only enable it on 6bpc panels. */
11831 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
11827 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 11832 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
11828 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11833 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11829 11834
@@ -12585,7 +12590,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
12585 continue; 12590 continue;
12586 12591
12587 if (!crtc_state->enable) { 12592 if (!crtc_state->enable) {
12588 intel_crtc_disable(crtc); 12593 if (crtc->state->enable)
12594 intel_crtc_disable(crtc);
12589 } else if (crtc->state->enable) { 12595 } else if (crtc->state->enable) {
12590 intel_crtc_disable_planes(crtc); 12596 intel_crtc_disable_planes(crtc);
12591 dev_priv->display.crtc_disable(crtc); 12597 dev_priv->display.crtc_disable(crtc);
@@ -12620,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
12620 12626
12621 modeset_update_crtc_power_domains(state); 12627 modeset_update_crtc_power_domains(state);
12622 12628
12623 drm_atomic_helper_commit_planes(dev, state);
12624
12625 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 12629 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12626 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12630 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12627 if (!needs_modeset(crtc->state) || !crtc->state->enable) 12631 if (!needs_modeset(crtc->state) || !crtc->state->enable) {
12632 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
12628 continue; 12633 continue;
12634 }
12629 12635
12630 update_scanline_offset(to_intel_crtc(crtc)); 12636 update_scanline_offset(to_intel_crtc(crtc));
12631 12637
12632 dev_priv->display.crtc_enable(crtc); 12638 dev_priv->display.crtc_enable(crtc);
12633 intel_crtc_enable_planes(crtc); 12639 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
12634 } 12640 }
12635 12641
12636 /* FIXME: add subpixel order */ 12642 /* FIXME: add subpixel order */
@@ -12887,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
12887 return 0; 12893 return 0;
12888} 12894}
12889 12895
12890static bool primary_plane_visible(struct drm_crtc *crtc)
12891{
12892 struct intel_plane_state *plane_state =
12893 to_intel_plane_state(crtc->primary->state);
12894
12895 return plane_state->visible;
12896}
12897
12898static int intel_crtc_set_config(struct drm_mode_set *set) 12896static int intel_crtc_set_config(struct drm_mode_set *set)
12899{ 12897{
12900 struct drm_device *dev; 12898 struct drm_device *dev;
12901 struct drm_atomic_state *state = NULL; 12899 struct drm_atomic_state *state = NULL;
12902 struct intel_crtc_state *pipe_config; 12900 struct intel_crtc_state *pipe_config;
12903 bool primary_plane_was_visible;
12904 int ret; 12901 int ret;
12905 12902
12906 BUG_ON(!set); 12903 BUG_ON(!set);
@@ -12939,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
12939 12936
12940 intel_update_pipe_size(to_intel_crtc(set->crtc)); 12937 intel_update_pipe_size(to_intel_crtc(set->crtc));
12941 12938
12942 primary_plane_was_visible = primary_plane_visible(set->crtc);
12943
12944 ret = intel_set_mode_with_config(set->crtc, pipe_config, true); 12939 ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
12945 12940
12946 if (ret == 0 &&
12947 pipe_config->base.enable &&
12948 pipe_config->base.planes_changed &&
12949 !needs_modeset(&pipe_config->base)) {
12950 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
12951
12952 /*
12953 * We need to make sure the primary plane is re-enabled if it
12954 * has previously been turned off.
12955 */
12956 if (ret == 0 && !primary_plane_was_visible &&
12957 primary_plane_visible(set->crtc)) {
12958 WARN_ON(!intel_crtc->active);
12959 intel_post_enable_primary(set->crtc);
12960 }
12961
12962 /*
12963 * In the fastboot case this may be our only check of the
12964 * state after boot. It would be better to only do it on
12965 * the first update, but we don't have a nice way of doing that
12966 * (and really, set_config isn't used much for high freq page
12967 * flipping, so increasing its cost here shouldn't be a big
12968 * deal).
12969 */
12970 if (i915.fastboot && ret == 0)
12971 intel_modeset_check_state(set->crtc->dev);
12972 }
12973
12974 if (ret) { 12941 if (ret) {
12975 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 12942 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
12976 set->crtc->base.id, ret); 12943 set->crtc->base.id, ret);
@@ -13270,7 +13237,7 @@ intel_check_primary_plane(struct drm_plane *plane,
13270 if (ret) 13237 if (ret)
13271 return ret; 13238 return ret;
13272 13239
13273 if (intel_crtc->active) { 13240 if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
13274 struct intel_plane_state *old_state = 13241 struct intel_plane_state *old_state =
13275 to_intel_plane_state(plane->state); 13242 to_intel_plane_state(plane->state);
13276 13243
@@ -13301,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
13301 */ 13268 */
13302 if (IS_BROADWELL(dev)) 13269 if (IS_BROADWELL(dev))
13303 intel_crtc->atomic.wait_vblank = true; 13270 intel_crtc->atomic.wait_vblank = true;
13271
13272 if (crtc_state)
13273 intel_crtc->atomic.post_enable_primary = true;
13304 } 13274 }
13305 13275
13306 /* 13276 /*
@@ -13313,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
13313 if (!state->visible || !fb) 13283 if (!state->visible || !fb)
13314 intel_crtc->atomic.disable_ips = true; 13284 intel_crtc->atomic.disable_ips = true;
13315 13285
13286 if (!state->visible && old_state->visible &&
13287 crtc_state && !needs_modeset(&crtc_state->base))
13288 intel_crtc->atomic.pre_disable_primary = true;
13289
13316 intel_crtc->atomic.fb_bits |= 13290 intel_crtc->atomic.fb_bits |=
13317 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 13291 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
13318 13292
@@ -15030,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15030 struct intel_plane_state *plane_state; 15004 struct intel_plane_state *plane_state;
15031 15005
15032 memset(crtc->config, 0, sizeof(*crtc->config)); 15006 memset(crtc->config, 0, sizeof(*crtc->config));
15007 crtc->config->base.crtc = &crtc->base;
15033 15008
15034 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 15009 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
15035 15010
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa253792..1df0e1fe235f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
93 93
94static const int skl_rates[] = { 162000, 216000, 270000, 94static const int skl_rates[] = { 162000, 216000, 270000,
95 324000, 432000, 540000 }; 95 324000, 432000, 540000 };
96static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
99static const int default_rates[] = { 162000, 270000, 540000 }; 96static const int default_rates[] = { 162000, 270000, 540000 };
100 97
101/** 98/**
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1169 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; 1166 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1170} 1167}
1171 1168
1169static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1170{
1171 /* WaDisableHBR2:skl */
1172 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1173 return false;
1174
1175 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1176 (INTEL_INFO(dev)->gen >= 9))
1177 return true;
1178 else
1179 return false;
1180}
1181
1172static int 1182static int
1173intel_dp_source_rates(struct drm_device *dev, const int **source_rates) 1183intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1174{ 1184{
1175 if (IS_SKYLAKE(dev)) { 1185 if (IS_SKYLAKE(dev)) {
1176 *source_rates = skl_rates; 1186 *source_rates = skl_rates;
1177 return ARRAY_SIZE(skl_rates); 1187 return ARRAY_SIZE(skl_rates);
1178 } else if (IS_CHERRYVIEW(dev)) {
1179 *source_rates = chv_rates;
1180 return ARRAY_SIZE(chv_rates);
1181 } 1188 }
1182 1189
1183 *source_rates = default_rates; 1190 *source_rates = default_rates;
1184 1191
1185 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) 1192 /* This depends on the fact that 5.4 is last value in the array */
1186 /* WaDisableHBR2:skl */ 1193 if (intel_dp_source_supports_hbr2(dev))
1187 return (DP_LINK_BW_2_7 >> 3) + 1;
1188 else if (INTEL_INFO(dev)->gen >= 8 ||
1189 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1190 return (DP_LINK_BW_5_4 >> 3) + 1; 1194 return (DP_LINK_BW_5_4 >> 3) + 1;
1191 else 1195 else
1192 return (DP_LINK_BW_2_7 >> 3) + 1; 1196 return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3941 } 3945 }
3942 } 3946 }
3943 3947
3944 /* Training Pattern 3 support, both source and sink */ 3948 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3949 * have support for TP3 hence that check is used along with dpcd check
3950 * to ensure TP3 can be enabled.
3951 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3952 * supported but still not enabled.
3953 */
3945 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3954 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3946 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && 3955 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3947 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { 3956 intel_dp_source_supports_hbr2(dev)) {
3948 intel_dp->use_tps3 = true; 3957 intel_dp->use_tps3 = true;
3949 DRM_DEBUG_KMS("Displayport TPS3 supported\n"); 3958 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3950 } else 3959 } else
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 6e4cc5334f47..600afdbef8c9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
357 return MODE_OK; 357 return MODE_OK;
358} 358}
359 359
360static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
361 struct drm_connector_state *state)
362{
363 struct intel_connector *intel_connector = to_intel_connector(connector);
364 struct intel_dp *intel_dp = intel_connector->mst_port;
365 struct intel_crtc *crtc = to_intel_crtc(state->crtc);
366
367 return &intel_dp->mst_encoders[crtc->pipe]->base.base;
368}
369
360static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) 370static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
361{ 371{
362 struct intel_connector *intel_connector = to_intel_connector(connector); 372 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
367static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { 377static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
368 .get_modes = intel_dp_mst_get_modes, 378 .get_modes = intel_dp_mst_get_modes,
369 .mode_valid = intel_dp_mst_mode_valid, 379 .mode_valid = intel_dp_mst_mode_valid,
380 .atomic_best_encoder = intel_mst_atomic_best_encoder,
370 .best_encoder = intel_mst_best_encoder, 381 .best_encoder = intel_mst_best_encoder,
371}; 382};
372 383
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffae5f5a..7f2161a1ff5d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
1012 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); 1012 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
1013 if (ret) 1013 if (ret)
1014 goto unpin_ctx_obj; 1014 goto unpin_ctx_obj;
1015
1016 ctx_obj->dirty = true;
1015 } 1017 }
1016 1018
1017 return ret; 1019 return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e539314ae87e..4be66f60504d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -275,6 +275,13 @@ struct intel_engine_cs {
275 * Do we have some not yet emitted requests outstanding? 275 * Do we have some not yet emitted requests outstanding?
276 */ 276 */
277 struct drm_i915_gem_request *outstanding_lazy_request; 277 struct drm_i915_gem_request *outstanding_lazy_request;
278 /**
279 * Seqno of request most recently submitted to request_list.
280 * Used exclusively by hang checker to avoid grabbing lock while
281 * inspecting request list.
282 */
283 u32 last_submitted_seqno;
284
278 bool gpu_caches_dirty; 285 bool gpu_caches_dirty;
279 286
280 wait_queue_head_t irq_queue; 287 wait_queue_head_t irq_queue;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index a6d8a3ee7750..260389acfb77 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1274 struct drm_i915_private *dev_priv = dev->dev_private; 1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 struct drm_i915_reg_read *reg = data; 1275 struct drm_i915_reg_read *reg = data;
1276 struct register_whitelist const *entry = whitelist; 1276 struct register_whitelist const *entry = whitelist;
1277 unsigned size;
1278 u64 offset;
1277 int i, ret = 0; 1279 int i, ret = 0;
1278 1280
1279 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1281 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1280 if (entry->offset == reg->offset && 1282 if (entry->offset == (reg->offset & -entry->size) &&
1281 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1283 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1282 break; 1284 break;
1283 } 1285 }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1285 if (i == ARRAY_SIZE(whitelist)) 1287 if (i == ARRAY_SIZE(whitelist))
1286 return -EINVAL; 1288 return -EINVAL;
1287 1289
1290 /* We use the low bits to encode extra flags as the register should
1291 * be naturally aligned (and those that are not so aligned merely
1292 * limit the available flags for that register).
1293 */
1294 offset = entry->offset;
1295 size = entry->size;
1296 size |= reg->offset ^ offset;
1297
1288 intel_runtime_pm_get(dev_priv); 1298 intel_runtime_pm_get(dev_priv);
1289 1299
1290 switch (entry->size) { 1300 switch (size) {
1301 case 8 | 1:
1302 reg->val = I915_READ64_2x32(offset, offset+4);
1303 break;
1291 case 8: 1304 case 8:
1292 reg->val = I915_READ64(reg->offset); 1305 reg->val = I915_READ64(offset);
1293 break; 1306 break;
1294 case 4: 1307 case 4:
1295 reg->val = I915_READ(reg->offset); 1308 reg->val = I915_READ(offset);
1296 break; 1309 break;
1297 case 2: 1310 case 2:
1298 reg->val = I915_READ16(reg->offset); 1311 reg->val = I915_READ16(offset);
1299 break; 1312 break;
1300 case 1: 1313 case 1:
1301 reg->val = I915_READ8(reg->offset); 1314 reg->val = I915_READ8(offset);
1302 break; 1315 break;
1303 default: 1316 default:
1304 MISSING_CASE(entry->size);
1305 ret = -EINVAL; 1317 ret = -EINVAL;
1306 goto out; 1318 goto out;
1307 } 1319 }
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 214eceefc981..e671ad369416 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
301 301
302 switch (tve->mode) { 302 switch (tve->mode) {
303 case TVE_MODE_VGA: 303 case TVE_MODE_VGA:
304 imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24, 304 imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
305 tve->hsync_pin, tve->vsync_pin); 305 tve->hsync_pin, tve->vsync_pin);
306 break; 306 break;
307 case TVE_MODE_TVOUT: 307 case TVE_MODE_TVOUT:
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 74a9ce40ddc4..b4deb9cf9d71 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -21,6 +21,7 @@
21#include <drm/drm_panel.h> 21#include <drm/drm_panel.h>
22#include <linux/videodev2.h> 22#include <linux/videodev2.h>
23#include <video/of_display_timing.h> 23#include <video/of_display_timing.h>
24#include <linux/of_graph.h>
24 25
25#include "imx-drm.h" 26#include "imx-drm.h"
26 27
@@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
208{ 209{
209 struct drm_device *drm = data; 210 struct drm_device *drm = data;
210 struct device_node *np = dev->of_node; 211 struct device_node *np = dev->of_node;
211 struct device_node *panel_node; 212 struct device_node *port;
212 const u8 *edidp; 213 const u8 *edidp;
213 struct imx_parallel_display *imxpd; 214 struct imx_parallel_display *imxpd;
214 int ret; 215 int ret;
@@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
234 imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; 235 imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
235 } 236 }
236 237
237 panel_node = of_parse_phandle(np, "fsl,panel", 0); 238 /* port@1 is the output port */
238 if (panel_node) { 239 port = of_graph_get_port_by_id(np, 1);
239 imxpd->panel = of_drm_find_panel(panel_node); 240 if (port) {
240 if (!imxpd->panel) 241 struct device_node *endpoint, *remote;
241 return -EPROBE_DEFER; 242
243 endpoint = of_get_child_by_name(port, "endpoint");
244 if (endpoint) {
245 remote = of_graph_get_remote_port_parent(endpoint);
246 if (remote)
247 imxpd->panel = of_drm_find_panel(remote);
248 if (!imxpd->panel)
249 return -EPROBE_DEFER;
250 }
242 } 251 }
243 252
244 imxpd->dev = dev; 253 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
220 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); 223 enum mdp4_frame_format frame_type;
224 224
225 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
226 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
227 return 0; 227 return 0;
228 } 228 }
229 229
230 frame_type = mdp4_get_frame_format(fb);
231
230 /* src values are in Q16 fixed point, convert to integer: */ 232 /* src values are in Q16 fixed point, convert to integer: */
231 src_x = src_x >> 16; 233 src_x = src_x >> 16;
232 src_y = src_y >> 16; 234 src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
76 76
77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
78{ 78{
79 int i;
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
82
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89
90 mdp5_plane_complete_commit(plane, plane_state);
91 }
92
80 mdp5_disable(mdp5_kms); 93 mdp5_disable(mdp5_kms);
81} 94}
82 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj); 227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 228uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 229void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state);
230enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
231struct drm_plane *mdp5_plane_init(struct drm_device *dev, 233struct drm_plane *mdp5_plane_init(struct drm_device *dev,
232 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
31 31
32 uint32_t nformats; 32 uint32_t nformats;
33 uint32_t formats[32]; 33 uint32_t formats[32];
34
35 bool enabled;
36}; 34};
37#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 35#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
38 36
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
56 return state->fb && state->crtc; 54 return state->fb && state->crtc;
57} 55}
58 56
59static int mdp5_plane_disable(struct drm_plane *plane)
60{
61 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
62 struct mdp5_kms *mdp5_kms = get_kms(plane);
63 enum mdp5_pipe pipe = mdp5_plane->pipe;
64
65 DBG("%s: disable", mdp5_plane->name);
66
67 if (mdp5_kms) {
68 /* Release the memory we requested earlier from the SMP: */
69 mdp5_smp_release(mdp5_kms->smp, pipe);
70 }
71
72 return 0;
73}
74
75static void mdp5_plane_destroy(struct drm_plane *plane) 57static void mdp5_plane_destroy(struct drm_plane *plane)
76{ 58{
77 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 59 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
224 206
225 if (!plane_enabled(state)) { 207 if (!plane_enabled(state)) {
226 to_mdp5_plane_state(state)->pending = true; 208 to_mdp5_plane_state(state)->pending = true;
227 mdp5_plane_disable(plane);
228 } else if (to_mdp5_plane_state(state)->mode_changed) { 209 } else if (to_mdp5_plane_state(state)->mode_changed) {
229 int ret; 210 int ret;
230 to_mdp5_plane_state(state)->pending = true; 211 to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
602 return mdp5_plane->flush_mask; 583 return mdp5_plane->flush_mask;
603} 584}
604 585
586/* called after vsync in thread context */
587void mdp5_plane_complete_commit(struct drm_plane *plane,
588 struct drm_plane_state *state)
589{
590 struct mdp5_kms *mdp5_kms = get_kms(plane);
591 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
592 enum mdp5_pipe pipe = mdp5_plane->pipe;
593
594 if (!plane_enabled(plane->state)) {
595 DBG("%s: free SMP", mdp5_plane->name);
596 mdp5_smp_release(mdp5_kms->smp, pipe);
597 }
598}
599
605/* initialize plane */ 600/* initialize plane */
606struct drm_plane *mdp5_plane_init(struct drm_device *dev, 601struct drm_plane *mdp5_plane_init(struct drm_device *dev,
607 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 * 35 *
36 * For each block that can be dynamically allocated, it can be either 36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps: 37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
38 * 51 *
39 * 1) mdp5_smp_request(): 52 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of 53 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or 54 * blocks needed per client, and request. Blocks neither inuse nor
42 * pending by any other client are added to client's pending 55 * configured nor pending by any other client are added to client's
43 * set. 56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
44 * 60 *
45 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
48 * 67 *
49 * 3) mdp5_smp_commit(): 68 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update 69 * After next vblank, copy configured -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
52 * 71 *
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
53 * On the next vblank after changes have been committed to hw, the 75 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer 76 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients). 77 * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 100};
79 101
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
80static inline 105static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp) 106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{ 107{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
149 for (i = cur_nblks; i > nblks; i--) { 174 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt); 175 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending); 176 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */ 177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
153 } 183 }
154 } 184 }
155 185
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
223/* Release SMP blocks for all clients of the pipe */ 253/* Release SMP blocks for all clients of the pipe */
224void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
225{ 255{
226 int i, nblks; 256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
227 282
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0); 283 set_fifo_thresholds(smp, pipe, 0);
231} 284}
232 285
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
274 u32 cid = pipe2client(pipe, i); 327 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
276 329
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
278 update_smp_state(smp, cid, &assigned); 339 update_smp_state(smp, cid, &assigned);
279 } 340 }
280} 341}
281 342
282/* step #3: after vblank, copy pending -> inuse: */ 343/* step #3: after vblank, copy configured -> inuse: */
283void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
284{ 345{
285 int cnt = smp->blk_cnt; 346 int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
295 * using, which can be released and made available to other 356 * using, which can be released and made available to other
296 * clients: 357 * clients:
297 */ 358 */
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
299 unsigned long flags; 360 unsigned long flags;
300 361
301 spin_lock_irqsave(&smp->state_lock, flags); 362 spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
306 update_smp_state(smp, CID_UNUSED, &released); 367 update_smp_state(smp, CID_UNUSED, &released);
307 } 368 }
308 369
309 bitmap_copy(ps->inuse, ps->pending, cnt); 370 bitmap_copy(ps->inuse, ps->configured, cnt);
310 } 371 }
311} 372}
312 373
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
23 23
24struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
25 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
26 mdp5_smp_state_t configured;
26 mdp5_smp_state_t pending; 27 mdp5_smp_state_t pending;
27}; 28};
28 29
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
283 283
284 timeout = ktime_add_ms(ktime_get(), 1000); 284 timeout = ktime_add_ms(ktime_get(), 1000);
285 285
286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 /* uninterruptible wait */
287 if (ret) { 287 msm_wait_fence(dev, c->fence, &timeout, false);
288 WARN_ON(ret); // TODO unswap state back? or??
289 commit_destroy(c);
290 return ret;
291 }
292 288
293 complete_commit(c); 289 complete_commit(c);
294 290
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
637 * Fences: 637 * Fences:
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence(struct drm_device *dev, uint32_t fence,
641 ktime_t *timeout) 641 ktime_t *timeout , bool interruptible)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
667 remaining_jiffies = timespec_to_jiffies(&ts); 667 remaining_jiffies = timespec_to_jiffies(&ts);
668 } 668 }
669 669
670 ret = wait_event_interruptible_timeout(priv->fence_event, 670 if (interruptible)
671 ret = wait_event_interruptible_timeout(priv->fence_event,
672 fence_completed(dev, fence),
673 remaining_jiffies);
674 else
675 ret = wait_event_timeout(priv->fence_event,
671 fence_completed(dev, fence), 676 fence_completed(dev, fence),
672 remaining_jiffies); 677 remaining_jiffies);
673 678
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
853 return -EINVAL; 858 return -EINVAL;
854 } 859 }
855 860
856 return msm_wait_fence_interruptable(dev, args->fence, &timeout); 861 return msm_wait_fence(dev, args->fence, &timeout, true);
857} 862}
858 863
859static const struct drm_ioctl_desc msm_ioctls[] = { 864static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
164 164
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence(struct drm_device *dev, uint32_t fence,
168 ktime_t *timeout); 168 ktime_t *timeout, bool interruptible);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
460 if (op & MSM_PREP_NOSYNC) 460 if (op & MSM_PREP_NOSYNC)
461 timeout = NULL; 461 timeout = NULL;
462 462
463 ret = msm_wait_fence_interruptable(dev, fence, timeout); 463 ret = msm_wait_fence(dev, fence, timeout, true);
464 } 464 }
465 465
466 /* TODO cache maintenance */ 466 /* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{ 24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 BUG_ON(!msm_obj->sgt); /* should have already pinned! */ 26 int npages = obj->size >> PAGE_SHIFT;
27 return msm_obj->sgt; 27
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
29 return NULL;
30
31 return drm_prime_pages_to_sg(msm_obj->pages, npages);
28} 32}
29 33
30void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 649024d4daf1..477cbb12809b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); 128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
129 nvif_client_fini(&cli->base); 129 nvif_client_fini(&cli->base);
130 usif_client_fini(cli); 130 usif_client_fini(cli);
131 kfree(cli);
131} 132}
132 133
133static void 134static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
865 866
866 pm_runtime_get_sync(dev->dev); 867 pm_runtime_get_sync(dev->dev);
867 868
869 mutex_lock(&cli->mutex);
868 if (cli->abi16) 870 if (cli->abi16)
869 nouveau_abi16_fini(cli->abi16); 871 nouveau_abi16_fini(cli->abi16);
872 mutex_unlock(&cli->mutex);
870 873
871 mutex_lock(&drm->client.mutex); 874 mutex_lock(&drm->client.mutex);
872 list_del(&cli->head); 875 list_del(&cli->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
92 return 0; 92 return 0;
93} 93}
94 94
95#if IS_ENABLED(CONFIG_IOMMU_API)
96
95static void nouveau_platform_probe_iommu(struct device *dev, 97static void nouveau_platform_probe_iommu(struct device *dev,
96 struct nouveau_platform_gpu *gpu) 98 struct nouveau_platform_gpu *gpu)
97{ 99{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
158 } 160 }
159} 161}
160 162
163#else
164
165static void nouveau_platform_probe_iommu(struct device *dev,
166 struct nouveau_platform_gpu *gpu)
167{
168}
169
170static void nouveau_platform_remove_iommu(struct device *dev,
171 struct nouveau_platform_gpu *gpu)
172{
173}
174
175#endif
176
161static int nouveau_platform_probe(struct platform_device *pdev) 177static int nouveau_platform_probe(struct platform_device *pdev)
162{ 178{
163 struct nouveau_platform_gpu *gpu; 179 struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 18f449715788..7464aef34674 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
175 node->page_shift = 12; 175 node->page_shift = 12;
176 176
177 switch (drm->device.info.family) { 177 switch (drm->device.info.family) {
178 case NV_DEVICE_INFO_V0_TNT:
179 case NV_DEVICE_INFO_V0_CELSIUS:
180 case NV_DEVICE_INFO_V0_KELVIN:
181 case NV_DEVICE_INFO_V0_RANKINE:
182 case NV_DEVICE_INFO_V0_CURIE:
183 break;
178 case NV_DEVICE_INFO_V0_TESLA: 184 case NV_DEVICE_INFO_V0_TESLA:
179 if (drm->device.info.chipset != 0x50) 185 if (drm->device.info.chipset != 0x50)
180 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 186 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
181 break; 187 break;
182 case NV_DEVICE_INFO_V0_FERMI: 188 case NV_DEVICE_INFO_V0_FERMI:
183 case NV_DEVICE_INFO_V0_KEPLER: 189 case NV_DEVICE_INFO_V0_KEPLER:
190 case NV_DEVICE_INFO_V0_MAXWELL:
184 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 191 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
185 break; 192 break;
186 default: 193 default:
194 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
195 drm->device.info.family);
187 break; 196 break;
188 } 197 }
189 198
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
203 if (ret) 203 if (ret)
204 return ret; 204 return ret;
205 205
206 if (RING_SPACE(chan, 49)) { 206 if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
207 nouveau_fbcon_gpu_lockup(info); 207 nouveau_fbcon_gpu_lockup(info);
208 return 0; 208 return 0;
209 } 209 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
979{ 979{
980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
981 981
982 if (show && nv_crtc->cursor.nvbo) 982 if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
983 nv50_crtc_cursor_show(nv_crtc); 983 nv50_crtc_cursor_show(nv_crtc);
984 else 984 else
985 nv50_crtc_cursor_hide(nv_crtc); 985 nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
190 190
191 ret = RING_SPACE(chan, 59); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 nouveau_fbcon_gpu_lockup(info); 193 nouveau_fbcon_gpu_lockup(info);
194 return ret; 194 return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
252 OUT_RING(chan, info->var.yres_virtual); 252 OUT_RING(chan, info->var.yres_virtual);
253 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 253 OUT_RING(chan, upper_32_bits(fb->vma.offset));
254 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 254 OUT_RING(chan, lower_32_bits(fb->vma.offset));
255 FIRE_RING(chan);
255 256
256 return 0; 257 return 0;
257} 258}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
188 return -EINVAL; 188 return -EINVAL;
189 } 189 }
190 190
191 ret = RING_SPACE(chan, 60); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 WARN_ON(1); 193 WARN_ON(1);
194 nouveau_fbcon_gpu_lockup(info); 194 nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 default: 810 default:
811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 return 0x0000; 812 return NULL;
813 } 813 }
814 } 814 }
815 815
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
663 gf100_gr_zbc_clear_depth(priv, index); 663 gf100_gr_zbc_clear_depth(priv, index);
664} 664}
665 665
666/**
667 * Wait until GR goes idle. GR is considered idle if it is disabled by the
668 * MC (0x200) register, or GR is not busy and a context switch is not in
669 * progress.
670 */
671int
672gf100_gr_wait_idle(struct gf100_gr_priv *priv)
673{
674 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
675 bool gr_enabled, ctxsw_active, gr_busy;
676
677 do {
678 /*
679 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
680 * up-to-date
681 */
682 nv_rd32(priv, 0x400700);
683
684 gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
685 ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
686 gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
687
688 if (!gr_enabled || (!gr_busy && !ctxsw_active))
689 return 0;
690 } while (time_before(jiffies, end_jiffies));
691
692 nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
693 gr_enabled, ctxsw_active, gr_busy);
694 return -EAGAIN;
695}
696
666void 697void
667gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 698gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
668{ 699{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
699 730
700 while (addr < next) { 731 while (addr < next) {
701 nv_wr32(priv, 0x400200, addr); 732 nv_wr32(priv, 0x400200, addr);
702 nv_wait(priv, 0x400700, 0x00000002, 0x00000000); 733 /**
734 * Wait for GR to go idle after submitting a
735 * GO_IDLE bundle
736 */
737 if ((addr & 0xffff) == 0xe100)
738 gf100_gr_wait_idle(priv);
739 nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
703 addr += init->pitch; 740 addr += init->pitch;
704 } 741 }
705 } 742 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
181 int ppc_nr; 181 int ppc_nr;
182}; 182};
183 183
184int gf100_gr_wait_idle(struct gf100_gr_priv *);
184void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); 185void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
185void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 186void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
186void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 187void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
332nvkm_perfctx_dtor(struct nvkm_object *object) 332nvkm_perfctx_dtor(struct nvkm_object *object)
333{ 333{
334 struct nvkm_pm *ppm = (void *)object->engine; 334 struct nvkm_pm *ppm = (void *)object->engine;
335 struct nvkm_perfctx *ctx = (void *)object;
336
335 mutex_lock(&nv_subdev(ppm)->mutex); 337 mutex_lock(&nv_subdev(ppm)->mutex);
336 nvkm_engctx_destroy(&ppm->context->base); 338 nvkm_engctx_destroy(&ctx->base);
337 ppm->context = NULL; 339 if (ppm->context == ctx)
340 ppm->context = NULL;
338 mutex_unlock(&nv_subdev(ppm)->mutex); 341 mutex_unlock(&nv_subdev(ppm)->mutex);
339} 342}
340 343
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
355 mutex_lock(&nv_subdev(ppm)->mutex); 358 mutex_lock(&nv_subdev(ppm)->mutex);
356 if (ppm->context == NULL) 359 if (ppm->context == NULL)
357 ppm->context = ctx; 360 ppm->context = ctx;
358 mutex_unlock(&nv_subdev(ppm)->mutex);
359
360 if (ctx != ppm->context) 361 if (ctx != ppm->context)
361 return -EBUSY; 362 ret = -EBUSY;
363 mutex_unlock(&nv_subdev(ppm)->mutex);
362 364
363 return 0; 365 return ret;
364} 366}
365 367
366struct nvkm_oclass 368struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
1285} 1285}
1286 1286
1287/** 1287/**
1288 * INIT_PLL_INDIRECT - opcode 0x59
1289 *
1290 */
1291static void
1292init_pll_indirect(struct nvbios_init *init)
1293{
1294 struct nvkm_bios *bios = init->bios;
1295 u32 reg = nv_ro32(bios, init->offset + 1);
1296 u16 addr = nv_ro16(bios, init->offset + 5);
1297 u32 freq = (u32)nv_ro16(bios, addr) * 1000;
1298
1299 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
1300 reg, addr, freq);
1301 init->offset += 7;
1302
1303 init_prog_pll(init, reg, freq);
1304}
1305
1306/**
1307 * INIT_ZM_REG_INDIRECT - opcode 0x5a
1308 *
1309 */
1310static void
1311init_zm_reg_indirect(struct nvbios_init *init)
1312{
1313 struct nvkm_bios *bios = init->bios;
1314 u32 reg = nv_ro32(bios, init->offset + 1);
1315 u16 addr = nv_ro16(bios, init->offset + 5);
1316 u32 data = nv_ro32(bios, addr);
1317
1318 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
1319 reg, addr, data);
1320 init->offset += 7;
1321
1322 init_wr32(init, addr, data);
1323}
1324
1325/**
1288 * INIT_SUB_DIRECT - opcode 0x5b 1326 * INIT_SUB_DIRECT - opcode 0x5b
1289 * 1327 *
1290 */ 1328 */
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
2145 [0x56] = { init_condition_time }, 2183 [0x56] = { init_condition_time },
2146 [0x57] = { init_ltime }, 2184 [0x57] = { init_ltime },
2147 [0x58] = { init_zm_reg_sequence }, 2185 [0x58] = { init_zm_reg_sequence },
2186 [0x59] = { init_pll_indirect },
2187 [0x5a] = { init_zm_reg_indirect },
2148 [0x5b] = { init_sub_direct }, 2188 [0x5b] = { init_sub_direct },
2149 [0x5c] = { init_jump }, 2189 [0x5c] = { init_jump },
2150 [0x5e] = { init_i2c_if }, 2190 [0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
180 struct gt215_clk_info *info) 180 struct gt215_clk_info *info)
181{ 181{
182 struct gt215_clk_priv *priv = (void *)clock; 182 struct gt215_clk_priv *priv = (void *)clock;
183 u32 oclk, sclk, sdiv, diff; 183 u32 oclk, sclk, sdiv;
184 s32 diff;
184 185
185 info->clk = 0; 186 info->clk = 0;
186 187
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
38 nv_wr32(priv, 0x12004c, 0x4); 38 nv_wr32(priv, 0x12004c, 0x4);
39 nv_wr32(priv, 0x122204, 0x2); 39 nv_wr32(priv, 0x122204, 0x2);
40 nv_rd32(priv, 0x122204); 40 nv_rd32(priv, 0x122204);
41
42 /*
43 * Bug: increase clock timeout to avoid operation failure at high
44 * gpcclk rate.
45 */
46 nv_wr32(priv, 0x122354, 0x800);
47 nv_wr32(priv, 0x128328, 0x800);
48 nv_wr32(priv, 0x124320, 0x800);
41} 49}
42 50
43static void 51static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
50{ 50{
51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
52 struct nv04_instobj_priv *node = (void *)object; 52 struct nv04_instobj_priv *node = (void *)object;
53 struct nvkm_subdev *subdev = (void *)priv;
54
55 mutex_lock(&subdev->mutex);
53 nvkm_mm_free(&priv->heap, &node->mem); 56 nvkm_mm_free(&priv->heap, &node->mem);
57 mutex_unlock(&subdev->mutex);
58
54 nvkm_instobj_destroy(&node->base); 59 nvkm_instobj_destroy(&node->base);
55} 60}
56 61
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); 67 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
63 struct nv04_instobj_priv *node; 68 struct nv04_instobj_priv *node;
64 struct nvkm_instobj_args *args = data; 69 struct nvkm_instobj_args *args = data;
70 struct nvkm_subdev *subdev = (void *)priv;
65 int ret; 71 int ret;
66 72
67 if (!args->align) 73 if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
72 if (ret) 78 if (ret)
73 return ret; 79 return ret;
74 80
81 mutex_lock(&subdev->mutex);
75 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, 82 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
76 args->align, &node->mem); 83 args->align, &node->mem);
84 mutex_unlock(&subdev->mutex);
77 if (ret) 85 if (ret)
78 return ret; 86 return ret;
79 87
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8c3d96..7841970de48d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
285 285
286 if (wait) { 286 if (wait) {
287 if (!wait_for_completion_timeout(&engine->compl, 287 if (!wait_for_completion_timeout(&engine->compl,
288 msecs_to_jiffies(1))) { 288 msecs_to_jiffies(100))) {
289 dev_err(dmm->dev, "timed out waiting for done\n"); 289 dev_err(dmm->dev, "timed out waiting for done\n");
290 ret = -ETIMEDOUT; 290 ret = -ETIMEDOUT;
291 } 291 }
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41f216f..12081e61d45a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
179int omap_framebuffer_pin(struct drm_framebuffer *fb); 179int omap_framebuffer_pin(struct drm_framebuffer *fb);
180int omap_framebuffer_unpin(struct drm_framebuffer *fb); 180void omap_framebuffer_unpin(struct drm_framebuffer *fb);
181void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 181void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
182 struct omap_drm_window *win, struct omap_overlay_info *info); 182 struct omap_drm_window *win, struct omap_overlay_info *info);
183struct drm_connector *omap_framebuffer_get_next_connector( 183struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
211 enum dma_data_direction dir); 211 enum dma_data_direction dir);
212int omap_gem_get_paddr(struct drm_gem_object *obj, 212int omap_gem_get_paddr(struct drm_gem_object *obj,
213 dma_addr_t *paddr, bool remap); 213 dma_addr_t *paddr, bool remap);
214int omap_gem_put_paddr(struct drm_gem_object *obj); 214void omap_gem_put_paddr(struct drm_gem_object *obj);
215int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 215int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
216 bool remap); 216 bool remap);
217int omap_gem_put_pages(struct drm_gem_object *obj); 217int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
236 /* PVR needs alignment to 8 pixels.. right now that is the most 236 /* PVR needs alignment to 8 pixels.. right now that is the most
237 * restrictive stride requirement.. 237 * restrictive stride requirement..
238 */ 238 */
239 return ALIGN(pitch, 8 * bytespp); 239 return roundup(pitch, 8 * bytespp);
240} 240}
241 241
242/* map crtc to vblank mask */ 242/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e76df1a..51b1219af87f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@ fail:
287} 287}
288 288
289/* unpin, no longer being scanned out: */ 289/* unpin, no longer being scanned out: */
290int omap_framebuffer_unpin(struct drm_framebuffer *fb) 290void omap_framebuffer_unpin(struct drm_framebuffer *fb)
291{ 291{
292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
293 int ret, i, n = drm_format_num_planes(fb->pixel_format); 293 int i, n = drm_format_num_planes(fb->pixel_format);
294 294
295 mutex_lock(&omap_fb->lock); 295 mutex_lock(&omap_fb->lock);
296 296
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
298 298
299 if (omap_fb->pin_count > 0) { 299 if (omap_fb->pin_count > 0) {
300 mutex_unlock(&omap_fb->lock); 300 mutex_unlock(&omap_fb->lock);
301 return 0; 301 return;
302 } 302 }
303 303
304 for (i = 0; i < n; i++) { 304 for (i = 0; i < n; i++) {
305 struct plane *plane = &omap_fb->planes[i]; 305 struct plane *plane = &omap_fb->planes[i];
306 ret = omap_gem_put_paddr(plane->bo); 306 omap_gem_put_paddr(plane->bo);
307 if (ret)
308 goto fail;
309 plane->paddr = 0; 307 plane->paddr = 0;
310 } 308 }
311 309
312 mutex_unlock(&omap_fb->lock); 310 mutex_unlock(&omap_fb->lock);
313
314 return 0;
315
316fail:
317 mutex_unlock(&omap_fb->lock);
318 return ret;
319} 311}
320 312
321struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) 313struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84389e3..720d16bce7e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
136 if (fbdev->ywrap_enabled) { 136 if (fbdev->ywrap_enabled) {
137 /* need to align pitch to page size if using DMM scrolling */ 137 /* need to align pitch to page size if using DMM scrolling */
138 mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE); 138 mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
139 } 139 }
140 140
141 /* allocate backing bo */ 141 /* allocate backing bo */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab77801cf5f..7ed08fdc4c42 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@ fail:
808/* Release physical address, when DMA is no longer being performed.. this 808/* Release physical address, when DMA is no longer being performed.. this
809 * could potentially unpin and unmap buffers from TILER 809 * could potentially unpin and unmap buffers from TILER
810 */ 810 */
811int omap_gem_put_paddr(struct drm_gem_object *obj) 811void omap_gem_put_paddr(struct drm_gem_object *obj)
812{ 812{
813 struct omap_gem_object *omap_obj = to_omap_bo(obj); 813 struct omap_gem_object *omap_obj = to_omap_bo(obj);
814 int ret = 0; 814 int ret;
815 815
816 mutex_lock(&obj->dev->struct_mutex); 816 mutex_lock(&obj->dev->struct_mutex);
817 if (omap_obj->paddr_cnt > 0) { 817 if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
821 if (ret) { 821 if (ret) {
822 dev_err(obj->dev->dev, 822 dev_err(obj->dev->dev,
823 "could not unpin pages: %d\n", ret); 823 "could not unpin pages: %d\n", ret);
824 goto fail;
825 } 824 }
826 ret = tiler_release(omap_obj->block); 825 ret = tiler_release(omap_obj->block);
827 if (ret) { 826 if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
832 omap_obj->block = NULL; 831 omap_obj->block = NULL;
833 } 832 }
834 } 833 }
835fail: 834
836 mutex_unlock(&obj->dev->struct_mutex); 835 mutex_unlock(&obj->dev->struct_mutex);
837 return ret;
838} 836}
839 837
840/* Get rotated scanout address (only valid if already pinned), at the 838/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1378 1376
1379 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1377 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1380 if (!omap_obj) 1378 if (!omap_obj)
1381 goto fail; 1379 return NULL;
1382
1383 spin_lock(&priv->list_lock);
1384 list_add(&omap_obj->mm_list, &priv->obj_list);
1385 spin_unlock(&priv->list_lock);
1386 1380
1387 obj = &omap_obj->base; 1381 obj = &omap_obj->base;
1388 1382
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1392 */ 1386 */
1393 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, 1387 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1394 &omap_obj->paddr, GFP_KERNEL); 1388 &omap_obj->paddr, GFP_KERNEL);
1395 if (omap_obj->vaddr) 1389 if (!omap_obj->vaddr) {
1396 flags |= OMAP_BO_DMA; 1390 kfree(omap_obj);
1391
1392 return NULL;
1393 }
1397 1394
1395 flags |= OMAP_BO_DMA;
1398 } 1396 }
1399 1397
1398 spin_lock(&priv->list_lock);
1399 list_add(&omap_obj->mm_list, &priv->obj_list);
1400 spin_unlock(&priv->list_lock);
1401
1400 omap_obj->flags = flags; 1402 omap_obj->flags = flags;
1401 1403
1402 if (flags & OMAP_BO_TILED) { 1404 if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276c4deb..098904696a5c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 21#include <drm/drm_atomic_helper.h>
21#include <drm/drm_plane_helper.h> 22#include <drm/drm_plane_helper.h>
22 23
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
153 dispc_ovl_enable(omap_plane->id, false); 154 dispc_ovl_enable(omap_plane->id, false);
154} 155}
155 156
157static int omap_plane_atomic_check(struct drm_plane *plane,
158 struct drm_plane_state *state)
159{
160 struct drm_crtc_state *crtc_state;
161
162 if (!state->crtc)
163 return 0;
164
165 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
166 if (IS_ERR(crtc_state))
167 return PTR_ERR(crtc_state);
168
169 if (state->crtc_x < 0 || state->crtc_y < 0)
170 return -EINVAL;
171
172 if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
173 return -EINVAL;
174
175 if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
176 return -EINVAL;
177
178 return 0;
179}
180
156static const struct drm_plane_helper_funcs omap_plane_helper_funcs = { 181static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
157 .prepare_fb = omap_plane_prepare_fb, 182 .prepare_fb = omap_plane_prepare_fb,
158 .cleanup_fb = omap_plane_cleanup_fb, 183 .cleanup_fb = omap_plane_cleanup_fb,
184 .atomic_check = omap_plane_atomic_check,
159 .atomic_update = omap_plane_atomic_update, 185 .atomic_update = omap_plane_atomic_update,
160 .atomic_disable = omap_plane_atomic_disable, 186 .atomic_disable = omap_plane_atomic_disable,
161}; 187};
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2299 encoder_mode = atombios_get_encoder_mode(encoder); 2299 encoder_mode = atombios_get_encoder_mode(encoder);
2300 if (connector && (radeon_audio != 0) && 2300 if (connector && (radeon_audio != 0) &&
2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2302 (ENCODER_MODE_IS_DP(encoder_mode) && 2302 ENCODER_MODE_IS_DP(encoder_mode)))
2303 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2304 radeon_audio_mode_set(encoder, adjusted_mode); 2303 radeon_audio_mode_set(encoder, adjusted_mode);
2305} 2304}
2306 2305
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 8730562323a8..4a09947be244 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5818 tmp |= DPM_ENABLED; 5818 tmp |= DPM_ENABLED;
5819 break; 5819 break;
5820 default: 5820 default:
5821 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); 5821 DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5822 break; 5822 break;
5823 } 5823 }
5824 WREG32_SMC(CNB_PWRMGT_CNTL, tmp); 5824 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4ecf5caa8c6d..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7964,23 +7964,27 @@ restart_ih:
7964 case 1: /* D1 vblank/vline */ 7964 case 1: /* D1 vblank/vline */
7965 switch (src_data) { 7965 switch (src_data) {
7966 case 0: /* D1 vblank */ 7966 case 0: /* D1 vblank */
7967 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) { 7967 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
7968 if (rdev->irq.crtc_vblank_int[0]) { 7968 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7969 drm_handle_vblank(rdev->ddev, 0); 7969
7970 rdev->pm.vblank_sync = true; 7970 if (rdev->irq.crtc_vblank_int[0]) {
7971 wake_up(&rdev->irq.vblank_queue); 7971 drm_handle_vblank(rdev->ddev, 0);
7972 } 7972 rdev->pm.vblank_sync = true;
7973 if (atomic_read(&rdev->irq.pflip[0])) 7973 wake_up(&rdev->irq.vblank_queue);
7974 radeon_crtc_handle_vblank(rdev, 0);
7975 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7976 DRM_DEBUG("IH: D1 vblank\n");
7977 } 7974 }
7975 if (atomic_read(&rdev->irq.pflip[0]))
7976 radeon_crtc_handle_vblank(rdev, 0);
7977 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7978 DRM_DEBUG("IH: D1 vblank\n");
7979
7978 break; 7980 break;
7979 case 1: /* D1 vline */ 7981 case 1: /* D1 vline */
7980 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) { 7982 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
7981 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; 7983 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7982 DRM_DEBUG("IH: D1 vline\n"); 7984
7983 } 7985 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7986 DRM_DEBUG("IH: D1 vline\n");
7987
7984 break; 7988 break;
7985 default: 7989 default:
7986 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 7990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@ restart_ih:
7990 case 2: /* D2 vblank/vline */ 7994 case 2: /* D2 vblank/vline */
7991 switch (src_data) { 7995 switch (src_data) {
7992 case 0: /* D2 vblank */ 7996 case 0: /* D2 vblank */
7993 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 7997 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
7994 if (rdev->irq.crtc_vblank_int[1]) { 7998 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7995 drm_handle_vblank(rdev->ddev, 1); 7999
7996 rdev->pm.vblank_sync = true; 8000 if (rdev->irq.crtc_vblank_int[1]) {
7997 wake_up(&rdev->irq.vblank_queue); 8001 drm_handle_vblank(rdev->ddev, 1);
7998 } 8002 rdev->pm.vblank_sync = true;
7999 if (atomic_read(&rdev->irq.pflip[1])) 8003 wake_up(&rdev->irq.vblank_queue);
8000 radeon_crtc_handle_vblank(rdev, 1);
8001 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8002 DRM_DEBUG("IH: D2 vblank\n");
8003 } 8004 }
8005 if (atomic_read(&rdev->irq.pflip[1]))
8006 radeon_crtc_handle_vblank(rdev, 1);
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8008 DRM_DEBUG("IH: D2 vblank\n");
8009
8004 break; 8010 break;
8005 case 1: /* D2 vline */ 8011 case 1: /* D2 vline */
8006 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 8012 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 8013 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8008 DRM_DEBUG("IH: D2 vline\n"); 8014
8009 } 8015 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
8016 DRM_DEBUG("IH: D2 vline\n");
8017
8010 break; 8018 break;
8011 default: 8019 default:
8012 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8020 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@ restart_ih:
8016 case 3: /* D3 vblank/vline */ 8024 case 3: /* D3 vblank/vline */
8017 switch (src_data) { 8025 switch (src_data) {
8018 case 0: /* D3 vblank */ 8026 case 0: /* D3 vblank */
8019 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 8027 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
8020 if (rdev->irq.crtc_vblank_int[2]) { 8028 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8021 drm_handle_vblank(rdev->ddev, 2); 8029
8022 rdev->pm.vblank_sync = true; 8030 if (rdev->irq.crtc_vblank_int[2]) {
8023 wake_up(&rdev->irq.vblank_queue); 8031 drm_handle_vblank(rdev->ddev, 2);
8024 } 8032 rdev->pm.vblank_sync = true;
8025 if (atomic_read(&rdev->irq.pflip[2])) 8033 wake_up(&rdev->irq.vblank_queue);
8026 radeon_crtc_handle_vblank(rdev, 2);
8027 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8028 DRM_DEBUG("IH: D3 vblank\n");
8029 } 8034 }
8035 if (atomic_read(&rdev->irq.pflip[2]))
8036 radeon_crtc_handle_vblank(rdev, 2);
8037 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8038 DRM_DEBUG("IH: D3 vblank\n");
8039
8030 break; 8040 break;
8031 case 1: /* D3 vline */ 8041 case 1: /* D3 vline */
8032 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 8042 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
8033 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 8043 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8034 DRM_DEBUG("IH: D3 vline\n"); 8044
8035 } 8045 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
8046 DRM_DEBUG("IH: D3 vline\n");
8047
8036 break; 8048 break;
8037 default: 8049 default:
8038 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8050 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@ restart_ih:
8042 case 4: /* D4 vblank/vline */ 8054 case 4: /* D4 vblank/vline */
8043 switch (src_data) { 8055 switch (src_data) {
8044 case 0: /* D4 vblank */ 8056 case 0: /* D4 vblank */
8045 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 8057 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
8046 if (rdev->irq.crtc_vblank_int[3]) { 8058 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8047 drm_handle_vblank(rdev->ddev, 3); 8059
8048 rdev->pm.vblank_sync = true; 8060 if (rdev->irq.crtc_vblank_int[3]) {
8049 wake_up(&rdev->irq.vblank_queue); 8061 drm_handle_vblank(rdev->ddev, 3);
8050 } 8062 rdev->pm.vblank_sync = true;
8051 if (atomic_read(&rdev->irq.pflip[3])) 8063 wake_up(&rdev->irq.vblank_queue);
8052 radeon_crtc_handle_vblank(rdev, 3);
8053 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8054 DRM_DEBUG("IH: D4 vblank\n");
8055 } 8064 }
8065 if (atomic_read(&rdev->irq.pflip[3]))
8066 radeon_crtc_handle_vblank(rdev, 3);
8067 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8068 DRM_DEBUG("IH: D4 vblank\n");
8069
8056 break; 8070 break;
8057 case 1: /* D4 vline */ 8071 case 1: /* D4 vline */
8058 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 8072 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
8059 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 8073 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8060 DRM_DEBUG("IH: D4 vline\n"); 8074
8061 } 8075 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
8076 DRM_DEBUG("IH: D4 vline\n");
8077
8062 break; 8078 break;
8063 default: 8079 default:
8064 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8080 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@ restart_ih:
8068 case 5: /* D5 vblank/vline */ 8084 case 5: /* D5 vblank/vline */
8069 switch (src_data) { 8085 switch (src_data) {
8070 case 0: /* D5 vblank */ 8086 case 0: /* D5 vblank */
8071 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 8087 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
8072 if (rdev->irq.crtc_vblank_int[4]) { 8088 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8073 drm_handle_vblank(rdev->ddev, 4); 8089
8074 rdev->pm.vblank_sync = true; 8090 if (rdev->irq.crtc_vblank_int[4]) {
8075 wake_up(&rdev->irq.vblank_queue); 8091 drm_handle_vblank(rdev->ddev, 4);
8076 } 8092 rdev->pm.vblank_sync = true;
8077 if (atomic_read(&rdev->irq.pflip[4])) 8093 wake_up(&rdev->irq.vblank_queue);
8078 radeon_crtc_handle_vblank(rdev, 4);
8079 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8080 DRM_DEBUG("IH: D5 vblank\n");
8081 } 8094 }
8095 if (atomic_read(&rdev->irq.pflip[4]))
8096 radeon_crtc_handle_vblank(rdev, 4);
8097 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8098 DRM_DEBUG("IH: D5 vblank\n");
8099
8082 break; 8100 break;
8083 case 1: /* D5 vline */ 8101 case 1: /* D5 vline */
8084 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 8102 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
8085 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 8103 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8086 DRM_DEBUG("IH: D5 vline\n"); 8104
8087 } 8105 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
8106 DRM_DEBUG("IH: D5 vline\n");
8107
8088 break; 8108 break;
8089 default: 8109 default:
8090 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8110 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@ restart_ih:
8094 case 6: /* D6 vblank/vline */ 8114 case 6: /* D6 vblank/vline */
8095 switch (src_data) { 8115 switch (src_data) {
8096 case 0: /* D6 vblank */ 8116 case 0: /* D6 vblank */
8097 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 8117 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
8098 if (rdev->irq.crtc_vblank_int[5]) { 8118 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8099 drm_handle_vblank(rdev->ddev, 5); 8119
8100 rdev->pm.vblank_sync = true; 8120 if (rdev->irq.crtc_vblank_int[5]) {
8101 wake_up(&rdev->irq.vblank_queue); 8121 drm_handle_vblank(rdev->ddev, 5);
8102 } 8122 rdev->pm.vblank_sync = true;
8103 if (atomic_read(&rdev->irq.pflip[5])) 8123 wake_up(&rdev->irq.vblank_queue);
8104 radeon_crtc_handle_vblank(rdev, 5);
8105 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8106 DRM_DEBUG("IH: D6 vblank\n");
8107 } 8124 }
8125 if (atomic_read(&rdev->irq.pflip[5]))
8126 radeon_crtc_handle_vblank(rdev, 5);
8127 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8128 DRM_DEBUG("IH: D6 vblank\n");
8129
8108 break; 8130 break;
8109 case 1: /* D6 vline */ 8131 case 1: /* D6 vline */
8110 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 8132 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
8111 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 8133 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8112 DRM_DEBUG("IH: D6 vline\n"); 8134
8113 } 8135 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
8136 DRM_DEBUG("IH: D6 vline\n");
8137
8114 break; 8138 break;
8115 default: 8139 default:
8116 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8140 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@ restart_ih:
8130 case 42: /* HPD hotplug */ 8154 case 42: /* HPD hotplug */
8131 switch (src_data) { 8155 switch (src_data) {
8132 case 0: 8156 case 0:
8133 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { 8157 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
8134 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; 8158 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8135 queue_hotplug = true; 8159
8136 DRM_DEBUG("IH: HPD1\n"); 8160 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
8137 } 8161 queue_hotplug = true;
8162 DRM_DEBUG("IH: HPD1\n");
8163
8138 break; 8164 break;
8139 case 1: 8165 case 1:
8140 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) { 8166 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
8141 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; 8167 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8142 queue_hotplug = true; 8168
8143 DRM_DEBUG("IH: HPD2\n"); 8169 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
8144 } 8170 queue_hotplug = true;
8171 DRM_DEBUG("IH: HPD2\n");
8172
8145 break; 8173 break;
8146 case 2: 8174 case 2:
8147 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) { 8175 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
8148 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 8176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8149 queue_hotplug = true; 8177
8150 DRM_DEBUG("IH: HPD3\n"); 8178 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
8151 } 8179 queue_hotplug = true;
8180 DRM_DEBUG("IH: HPD3\n");
8181
8152 break; 8182 break;
8153 case 3: 8183 case 3:
8154 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) { 8184 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
8155 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 8185 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8156 queue_hotplug = true; 8186
8157 DRM_DEBUG("IH: HPD4\n"); 8187 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
8158 } 8188 queue_hotplug = true;
8189 DRM_DEBUG("IH: HPD4\n");
8190
8159 break; 8191 break;
8160 case 4: 8192 case 4:
8161 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) { 8193 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
8162 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 8194 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8163 queue_hotplug = true; 8195
8164 DRM_DEBUG("IH: HPD5\n"); 8196 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
8165 } 8197 queue_hotplug = true;
8198 DRM_DEBUG("IH: HPD5\n");
8199
8166 break; 8200 break;
8167 case 5: 8201 case 5:
8168 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 8202 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
8169 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 8203 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8170 queue_hotplug = true; 8204
8171 DRM_DEBUG("IH: HPD6\n"); 8205 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
8172 } 8206 queue_hotplug = true;
8207 DRM_DEBUG("IH: HPD6\n");
8208
8173 break; 8209 break;
8174 case 6: 8210 case 6:
8175 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { 8211 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
8176 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; 8212 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8177 queue_dp = true; 8213
8178 DRM_DEBUG("IH: HPD_RX 1\n"); 8214 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
8179 } 8215 queue_dp = true;
8216 DRM_DEBUG("IH: HPD_RX 1\n");
8217
8180 break; 8218 break;
8181 case 7: 8219 case 7:
8182 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 8220 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
8183 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 8221 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8184 queue_dp = true; 8222
8185 DRM_DEBUG("IH: HPD_RX 2\n"); 8223 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
8186 } 8224 queue_dp = true;
8225 DRM_DEBUG("IH: HPD_RX 2\n");
8226
8187 break; 8227 break;
8188 case 8: 8228 case 8:
8189 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 8229 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
8190 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 8230 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8191 queue_dp = true; 8231
8192 DRM_DEBUG("IH: HPD_RX 3\n"); 8232 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
8193 } 8233 queue_dp = true;
8234 DRM_DEBUG("IH: HPD_RX 3\n");
8235
8194 break; 8236 break;
8195 case 9: 8237 case 9:
8196 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 8238 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
8197 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 8239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8198 queue_dp = true; 8240
8199 DRM_DEBUG("IH: HPD_RX 4\n"); 8241 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
8200 } 8242 queue_dp = true;
8243 DRM_DEBUG("IH: HPD_RX 4\n");
8244
8201 break; 8245 break;
8202 case 10: 8246 case 10:
8203 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 8247 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
8204 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 8248 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8205 queue_dp = true; 8249
8206 DRM_DEBUG("IH: HPD_RX 5\n"); 8250 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
8207 } 8251 queue_dp = true;
8252 DRM_DEBUG("IH: HPD_RX 5\n");
8253
8208 break; 8254 break;
8209 case 11: 8255 case 11:
8210 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 8256 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
8211 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 8257 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8212 queue_dp = true; 8258
8213 DRM_DEBUG("IH: HPD_RX 6\n"); 8259 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
8214 } 8260 queue_dp = true;
8261 DRM_DEBUG("IH: HPD_RX 6\n");
8262
8215 break; 8263 break;
8216 default: 8264 default:
8217 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8265 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset;
97 96
98 if (!dig || !dig->afmt || !dig->afmt->pin) 97 if (!dig || !dig->afmt || !dig->pin)
99 return; 98 return;
100 99
101 offset = dig->afmt->offset; 100 WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
102 101 AFMT_AUDIO_SRC_SELECT(dig->pin->id));
103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
105} 102}
106 103
107void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, 104void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
108 struct drm_connector *connector, struct drm_display_mode *mode) 105 struct drm_connector *connector,
106 struct drm_display_mode *mode)
109{ 107{
110 struct radeon_device *rdev = encoder->dev->dev_private; 108 struct radeon_device *rdev = encoder->dev->dev_private;
111 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
112 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
113 u32 tmp = 0, offset; 111 u32 tmp = 0;
114 112
115 if (!dig || !dig->afmt || !dig->afmt->pin) 113 if (!dig || !dig->afmt || !dig->pin)
116 return; 114 return;
117 115
118 offset = dig->afmt->pin->offset;
119
120 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 116 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
121 if (connector->latency_present[1]) 117 if (connector->latency_present[1])
122 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | 118 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
130 else 126 else
131 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); 127 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
132 } 128 }
133 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 129 WREG32_ENDPOINT(dig->pin->offset,
130 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
134} 131}
135 132
136void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, 133void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
137 u8 *sadb, int sad_count) 134 u8 *sadb, int sad_count)
138{ 135{
139 struct radeon_device *rdev = encoder->dev->dev_private; 136 struct radeon_device *rdev = encoder->dev->dev_private;
140 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 137 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
141 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 138 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
142 u32 offset, tmp; 139 u32 tmp;
143 140
144 if (!dig || !dig->afmt || !dig->afmt->pin) 141 if (!dig || !dig->afmt || !dig->pin)
145 return; 142 return;
146 143
147 offset = dig->afmt->pin->offset;
148
149 /* program the speaker allocation */ 144 /* program the speaker allocation */
150 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 145 tmp = RREG32_ENDPOINT(dig->pin->offset,
146 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
151 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 147 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
152 /* set HDMI mode */ 148 /* set HDMI mode */
153 tmp |= HDMI_CONNECTION; 149 tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
155 tmp |= SPEAKER_ALLOCATION(sadb[0]); 151 tmp |= SPEAKER_ALLOCATION(sadb[0]);
156 else 152 else
157 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 153 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
158 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 154 WREG32_ENDPOINT(dig->pin->offset,
155 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
159} 156}
160 157
161void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, 158void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
162 u8 *sadb, int sad_count) 159 u8 *sadb, int sad_count)
163{ 160{
164 struct radeon_device *rdev = encoder->dev->dev_private; 161 struct radeon_device *rdev = encoder->dev->dev_private;
165 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 162 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
166 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 163 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
167 u32 offset, tmp; 164 u32 tmp;
168 165
169 if (!dig || !dig->afmt || !dig->afmt->pin) 166 if (!dig || !dig->afmt || !dig->pin)
170 return; 167 return;
171 168
172 offset = dig->afmt->pin->offset;
173
174 /* program the speaker allocation */ 169 /* program the speaker allocation */
175 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 170 tmp = RREG32_ENDPOINT(dig->pin->offset,
171 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
176 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); 172 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
177 /* set DP mode */ 173 /* set DP mode */
178 tmp |= DP_CONNECTION; 174 tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
180 tmp |= SPEAKER_ALLOCATION(sadb[0]); 176 tmp |= SPEAKER_ALLOCATION(sadb[0]);
181 else 177 else
182 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 178 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
183 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 179 WREG32_ENDPOINT(dig->pin->offset,
180 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
184} 181}
185 182
186void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, 183void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
187 struct cea_sad *sads, int sad_count) 184 struct cea_sad *sads, int sad_count)
188{ 185{
189 u32 offset;
190 int i; 186 int i;
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 187 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 188 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
206 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 202 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
207 }; 203 };
208 204
209 if (!dig || !dig->afmt || !dig->afmt->pin) 205 if (!dig || !dig->afmt || !dig->pin)
210 return; 206 return;
211 207
212 offset = dig->afmt->pin->offset;
213
214 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 208 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
215 u32 value = 0; 209 u32 value = 0;
216 u8 stereo_freqs = 0; 210 u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
237 231
238 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 232 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
239 233
240 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 234 WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
241 } 235 }
242} 236}
243 237
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
253} 247}
254 248
255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 249void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
256 struct radeon_crtc *crtc, unsigned int clock) 250 struct radeon_crtc *crtc, unsigned int clock)
257{ 251{
258 /* Two dtos; generally use dto0 for HDMI */ 252 /* Two dtos; generally use dto0 for HDMI */
259 u32 value = 0; 253 u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 266}
273 267
274void dce6_dp_audio_set_dto(struct radeon_device *rdev, 268void dce6_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 269 struct radeon_crtc *crtc, unsigned int clock)
276{ 270{
277 /* Two dtos; generally use dto1 for DP */ 271 /* Two dtos; generally use dto1 for DP */
278 u32 value = 0; 272 u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
4924 return IRQ_NONE; 4924 return IRQ_NONE;
4925 4925
4926 rptr = rdev->ih.rptr; 4926 rptr = rdev->ih.rptr;
4927 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4927 DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4928 4928
4929 /* Order reading of wptr vs. reading of IH ring data */ 4929 /* Order reading of wptr vs. reading of IH ring data */
4930 rmb(); 4930 rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
4942 case 1: /* D1 vblank/vline */ 4942 case 1: /* D1 vblank/vline */
4943 switch (src_data) { 4943 switch (src_data) {
4944 case 0: /* D1 vblank */ 4944 case 0: /* D1 vblank */
4945 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 4945 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
4946 if (rdev->irq.crtc_vblank_int[0]) { 4946 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4947 drm_handle_vblank(rdev->ddev, 0); 4947
4948 rdev->pm.vblank_sync = true; 4948 if (rdev->irq.crtc_vblank_int[0]) {
4949 wake_up(&rdev->irq.vblank_queue); 4949 drm_handle_vblank(rdev->ddev, 0);
4950 } 4950 rdev->pm.vblank_sync = true;
4951 if (atomic_read(&rdev->irq.pflip[0])) 4951 wake_up(&rdev->irq.vblank_queue);
4952 radeon_crtc_handle_vblank(rdev, 0);
4953 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4954 DRM_DEBUG("IH: D1 vblank\n");
4955 } 4952 }
4953 if (atomic_read(&rdev->irq.pflip[0]))
4954 radeon_crtc_handle_vblank(rdev, 0);
4955 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4956 DRM_DEBUG("IH: D1 vblank\n");
4957
4956 break; 4958 break;
4957 case 1: /* D1 vline */ 4959 case 1: /* D1 vline */
4958 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 4960 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
4959 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4961 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4960 DRM_DEBUG("IH: D1 vline\n"); 4962
4961 } 4963 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4964 DRM_DEBUG("IH: D1 vline\n");
4965
4962 break; 4966 break;
4963 default: 4967 default:
4964 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4968 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
4968 case 2: /* D2 vblank/vline */ 4972 case 2: /* D2 vblank/vline */
4969 switch (src_data) { 4973 switch (src_data) {
4970 case 0: /* D2 vblank */ 4974 case 0: /* D2 vblank */
4971 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 4975 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
4972 if (rdev->irq.crtc_vblank_int[1]) { 4976 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4973 drm_handle_vblank(rdev->ddev, 1); 4977
4974 rdev->pm.vblank_sync = true; 4978 if (rdev->irq.crtc_vblank_int[1]) {
4975 wake_up(&rdev->irq.vblank_queue); 4979 drm_handle_vblank(rdev->ddev, 1);
4976 } 4980 rdev->pm.vblank_sync = true;
4977 if (atomic_read(&rdev->irq.pflip[1])) 4981 wake_up(&rdev->irq.vblank_queue);
4978 radeon_crtc_handle_vblank(rdev, 1);
4979 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4980 DRM_DEBUG("IH: D2 vblank\n");
4981 } 4982 }
4983 if (atomic_read(&rdev->irq.pflip[1]))
4984 radeon_crtc_handle_vblank(rdev, 1);
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4986 DRM_DEBUG("IH: D2 vblank\n");
4987
4982 break; 4988 break;
4983 case 1: /* D2 vline */ 4989 case 1: /* D2 vline */
4984 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 4990 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 4991 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4986 DRM_DEBUG("IH: D2 vline\n"); 4992
4987 } 4993 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
4994 DRM_DEBUG("IH: D2 vline\n");
4995
4988 break; 4996 break;
4989 default: 4997 default:
4990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4998 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
4994 case 3: /* D3 vblank/vline */ 5002 case 3: /* D3 vblank/vline */
4995 switch (src_data) { 5003 switch (src_data) {
4996 case 0: /* D3 vblank */ 5004 case 0: /* D3 vblank */
4997 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 5005 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
4998 if (rdev->irq.crtc_vblank_int[2]) { 5006 DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
4999 drm_handle_vblank(rdev->ddev, 2); 5007
5000 rdev->pm.vblank_sync = true; 5008 if (rdev->irq.crtc_vblank_int[2]) {
5001 wake_up(&rdev->irq.vblank_queue); 5009 drm_handle_vblank(rdev->ddev, 2);
5002 } 5010 rdev->pm.vblank_sync = true;
5003 if (atomic_read(&rdev->irq.pflip[2])) 5011 wake_up(&rdev->irq.vblank_queue);
5004 radeon_crtc_handle_vblank(rdev, 2);
5005 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5006 DRM_DEBUG("IH: D3 vblank\n");
5007 } 5012 }
5013 if (atomic_read(&rdev->irq.pflip[2]))
5014 radeon_crtc_handle_vblank(rdev, 2);
5015 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5016 DRM_DEBUG("IH: D3 vblank\n");
5017
5008 break; 5018 break;
5009 case 1: /* D3 vline */ 5019 case 1: /* D3 vline */
5010 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 5020 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
5011 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 5021 DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
5012 DRM_DEBUG("IH: D3 vline\n"); 5022
5013 } 5023 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
5024 DRM_DEBUG("IH: D3 vline\n");
5025
5014 break; 5026 break;
5015 default: 5027 default:
5016 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5028 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
5020 case 4: /* D4 vblank/vline */ 5032 case 4: /* D4 vblank/vline */
5021 switch (src_data) { 5033 switch (src_data) {
5022 case 0: /* D4 vblank */ 5034 case 0: /* D4 vblank */
5023 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 5035 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
5024 if (rdev->irq.crtc_vblank_int[3]) { 5036 DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
5025 drm_handle_vblank(rdev->ddev, 3); 5037
5026 rdev->pm.vblank_sync = true; 5038 if (rdev->irq.crtc_vblank_int[3]) {
5027 wake_up(&rdev->irq.vblank_queue); 5039 drm_handle_vblank(rdev->ddev, 3);
5028 } 5040 rdev->pm.vblank_sync = true;
5029 if (atomic_read(&rdev->irq.pflip[3])) 5041 wake_up(&rdev->irq.vblank_queue);
5030 radeon_crtc_handle_vblank(rdev, 3);
5031 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5032 DRM_DEBUG("IH: D4 vblank\n");
5033 } 5042 }
5043 if (atomic_read(&rdev->irq.pflip[3]))
5044 radeon_crtc_handle_vblank(rdev, 3);
5045 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5046 DRM_DEBUG("IH: D4 vblank\n");
5047
5034 break; 5048 break;
5035 case 1: /* D4 vline */ 5049 case 1: /* D4 vline */
5036 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 5050 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
5037 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 5051 DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
5038 DRM_DEBUG("IH: D4 vline\n"); 5052
5039 } 5053 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
5054 DRM_DEBUG("IH: D4 vline\n");
5055
5040 break; 5056 break;
5041 default: 5057 default:
5042 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5058 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
5046 case 5: /* D5 vblank/vline */ 5062 case 5: /* D5 vblank/vline */
5047 switch (src_data) { 5063 switch (src_data) {
5048 case 0: /* D5 vblank */ 5064 case 0: /* D5 vblank */
5049 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 5065 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
5050 if (rdev->irq.crtc_vblank_int[4]) { 5066 DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
5051 drm_handle_vblank(rdev->ddev, 4); 5067
5052 rdev->pm.vblank_sync = true; 5068 if (rdev->irq.crtc_vblank_int[4]) {
5053 wake_up(&rdev->irq.vblank_queue); 5069 drm_handle_vblank(rdev->ddev, 4);
5054 } 5070 rdev->pm.vblank_sync = true;
5055 if (atomic_read(&rdev->irq.pflip[4])) 5071 wake_up(&rdev->irq.vblank_queue);
5056 radeon_crtc_handle_vblank(rdev, 4);
5057 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5058 DRM_DEBUG("IH: D5 vblank\n");
5059 } 5072 }
5073 if (atomic_read(&rdev->irq.pflip[4]))
5074 radeon_crtc_handle_vblank(rdev, 4);
5075 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5076 DRM_DEBUG("IH: D5 vblank\n");
5077
5060 break; 5078 break;
5061 case 1: /* D5 vline */ 5079 case 1: /* D5 vline */
5062 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 5080 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
5063 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 5081 DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
5064 DRM_DEBUG("IH: D5 vline\n"); 5082
5065 } 5083 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
5084 DRM_DEBUG("IH: D5 vline\n");
5085
5066 break; 5086 break;
5067 default: 5087 default:
5068 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5088 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
5072 case 6: /* D6 vblank/vline */ 5092 case 6: /* D6 vblank/vline */
5073 switch (src_data) { 5093 switch (src_data) {
5074 case 0: /* D6 vblank */ 5094 case 0: /* D6 vblank */
5075 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 5095 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
5076 if (rdev->irq.crtc_vblank_int[5]) { 5096 DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
5077 drm_handle_vblank(rdev->ddev, 5); 5097
5078 rdev->pm.vblank_sync = true; 5098 if (rdev->irq.crtc_vblank_int[5]) {
5079 wake_up(&rdev->irq.vblank_queue); 5099 drm_handle_vblank(rdev->ddev, 5);
5080 } 5100 rdev->pm.vblank_sync = true;
5081 if (atomic_read(&rdev->irq.pflip[5])) 5101 wake_up(&rdev->irq.vblank_queue);
5082 radeon_crtc_handle_vblank(rdev, 5);
5083 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5084 DRM_DEBUG("IH: D6 vblank\n");
5085 } 5102 }
5103 if (atomic_read(&rdev->irq.pflip[5]))
5104 radeon_crtc_handle_vblank(rdev, 5);
5105 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5106 DRM_DEBUG("IH: D6 vblank\n");
5107
5086 break; 5108 break;
5087 case 1: /* D6 vline */ 5109 case 1: /* D6 vline */
5088 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 5110 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
5089 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 5111 DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
5090 DRM_DEBUG("IH: D6 vline\n"); 5112
5091 } 5113 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
5114 DRM_DEBUG("IH: D6 vline\n");
5115
5092 break; 5116 break;
5093 default: 5117 default:
5094 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5118 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
5108 case 42: /* HPD hotplug */ 5132 case 42: /* HPD hotplug */
5109 switch (src_data) { 5133 switch (src_data) {
5110 case 0: 5134 case 0:
5111 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 5135 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
5112 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 5136 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5113 queue_hotplug = true; 5137
5114 DRM_DEBUG("IH: HPD1\n"); 5138 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
5115 } 5139 queue_hotplug = true;
5140 DRM_DEBUG("IH: HPD1\n");
5116 break; 5141 break;
5117 case 1: 5142 case 1:
5118 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 5143 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
5119 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 5144 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5120 queue_hotplug = true; 5145
5121 DRM_DEBUG("IH: HPD2\n"); 5146 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
5122 } 5147 queue_hotplug = true;
5148 DRM_DEBUG("IH: HPD2\n");
5123 break; 5149 break;
5124 case 2: 5150 case 2:
5125 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 5151 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
5126 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 5152 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5127 queue_hotplug = true; 5153
5128 DRM_DEBUG("IH: HPD3\n"); 5154 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5129 } 5155 queue_hotplug = true;
5156 DRM_DEBUG("IH: HPD3\n");
5130 break; 5157 break;
5131 case 3: 5158 case 3:
5132 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 5159 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
5133 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 5160 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5134 queue_hotplug = true; 5161
5135 DRM_DEBUG("IH: HPD4\n"); 5162 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5136 } 5163 queue_hotplug = true;
5164 DRM_DEBUG("IH: HPD4\n");
5137 break; 5165 break;
5138 case 4: 5166 case 4:
5139 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 5167 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
5140 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 5168 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5141 queue_hotplug = true; 5169
5142 DRM_DEBUG("IH: HPD5\n"); 5170 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5143 } 5171 queue_hotplug = true;
5172 DRM_DEBUG("IH: HPD5\n");
5144 break; 5173 break;
5145 case 5: 5174 case 5:
5146 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 5175 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
5147 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 5176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5148 queue_hotplug = true; 5177
5149 DRM_DEBUG("IH: HPD6\n"); 5178 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5150 } 5179 queue_hotplug = true;
5180 DRM_DEBUG("IH: HPD6\n");
5151 break; 5181 break;
5152 case 6: 5182 case 6:
5153 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 5183 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
5154 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 5184 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5155 queue_dp = true; 5185
5156 DRM_DEBUG("IH: HPD_RX 1\n"); 5186 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
5157 } 5187 queue_dp = true;
5188 DRM_DEBUG("IH: HPD_RX 1\n");
5158 break; 5189 break;
5159 case 7: 5190 case 7:
5160 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 5191 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
5161 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 5192 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5162 queue_dp = true; 5193
5163 DRM_DEBUG("IH: HPD_RX 2\n"); 5194 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
5164 } 5195 queue_dp = true;
5196 DRM_DEBUG("IH: HPD_RX 2\n");
5165 break; 5197 break;
5166 case 8: 5198 case 8:
5167 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 5199 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
5168 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 5200 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5169 queue_dp = true; 5201
5170 DRM_DEBUG("IH: HPD_RX 3\n"); 5202 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
5171 } 5203 queue_dp = true;
5204 DRM_DEBUG("IH: HPD_RX 3\n");
5172 break; 5205 break;
5173 case 9: 5206 case 9:
5174 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 5207 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
5175 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 5208 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5176 queue_dp = true; 5209
5177 DRM_DEBUG("IH: HPD_RX 4\n"); 5210 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
5178 } 5211 queue_dp = true;
5212 DRM_DEBUG("IH: HPD_RX 4\n");
5179 break; 5213 break;
5180 case 10: 5214 case 10:
5181 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 5215 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
5182 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 5216 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5183 queue_dp = true; 5217
5184 DRM_DEBUG("IH: HPD_RX 5\n"); 5218 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
5185 } 5219 queue_dp = true;
5220 DRM_DEBUG("IH: HPD_RX 5\n");
5186 break; 5221 break;
5187 case 11: 5222 case 11:
5188 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 5223 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
5189 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 5224 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5190 queue_dp = true; 5225
5191 DRM_DEBUG("IH: HPD_RX 6\n"); 5226 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
5192 } 5227 queue_dp = true;
5228 DRM_DEBUG("IH: HPD_RX 6\n");
5193 break; 5229 break;
5194 default: 5230 default:
5195 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5231 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
5199 case 44: /* hdmi */ 5235 case 44: /* hdmi */
5200 switch (src_data) { 5236 switch (src_data) {
5201 case 0: 5237 case 0:
5202 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { 5238 if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
5203 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; 5239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5204 queue_hdmi = true; 5240
5205 DRM_DEBUG("IH: HDMI0\n"); 5241 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
5206 } 5242 queue_hdmi = true;
5243 DRM_DEBUG("IH: HDMI0\n");
5207 break; 5244 break;
5208 case 1: 5245 case 1:
5209 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { 5246 if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
5210 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; 5247 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5211 queue_hdmi = true; 5248
5212 DRM_DEBUG("IH: HDMI1\n"); 5249 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
5213 } 5250 queue_hdmi = true;
5251 DRM_DEBUG("IH: HDMI1\n");
5214 break; 5252 break;
5215 case 2: 5253 case 2:
5216 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { 5254 if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
5217 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; 5255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5218 queue_hdmi = true; 5256
5219 DRM_DEBUG("IH: HDMI2\n"); 5257 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
5220 } 5258 queue_hdmi = true;
5259 DRM_DEBUG("IH: HDMI2\n");
5221 break; 5260 break;
5222 case 3: 5261 case 3:
5223 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { 5262 if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
5224 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; 5263 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5225 queue_hdmi = true; 5264
5226 DRM_DEBUG("IH: HDMI3\n"); 5265 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
5227 } 5266 queue_hdmi = true;
5267 DRM_DEBUG("IH: HDMI3\n");
5228 break; 5268 break;
5229 case 4: 5269 case 4:
5230 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { 5270 if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
5231 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; 5271 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5232 queue_hdmi = true; 5272
5233 DRM_DEBUG("IH: HDMI4\n"); 5273 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
5234 } 5274 queue_hdmi = true;
5275 DRM_DEBUG("IH: HDMI4\n");
5235 break; 5276 break;
5236 case 5: 5277 case 5:
5237 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { 5278 if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
5238 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; 5279 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5239 queue_hdmi = true; 5280
5240 DRM_DEBUG("IH: HDMI5\n"); 5281 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
5241 } 5282 queue_hdmi = true;
5283 DRM_DEBUG("IH: HDMI5\n");
5242 break; 5284 break;
5243 default: 5285 default:
5244 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5286 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2163 } 2163 }
2164 2164
2165 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 2165 if (rdev->family == CHIP_ARUBA) {
2166 if (ring->ring_size) 2166 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2167 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2167 if (ring->ring_size)
2168 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2168 2169
2169 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 2170 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2170 if (ring->ring_size) 2171 if (ring->ring_size)
2171 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2172 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2172 2173
2173 if (!r) 2174 if (!r)
2174 r = vce_v1_0_init(rdev); 2175 r = vce_v1_0_init(rdev);
2175 else if (r != -ENOENT) 2176 if (r)
2176 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); 2177 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
2178 }
2177 2179
2178 r = radeon_ib_pool_init(rdev); 2180 r = radeon_ib_pool_init(rdev);
2179 if (r) { 2181 if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
2396 radeon_irq_kms_fini(rdev); 2398 radeon_irq_kms_fini(rdev);
2397 uvd_v1_0_fini(rdev); 2399 uvd_v1_0_fini(rdev);
2398 radeon_uvd_fini(rdev); 2400 radeon_uvd_fini(rdev);
2399 radeon_vce_fini(rdev); 2401 if (rdev->family == CHIP_ARUBA)
2402 radeon_vce_fini(rdev);
2400 cayman_pcie_gart_fini(rdev); 2403 cayman_pcie_gart_fini(rdev);
2401 r600_vram_scratch_fini(rdev); 2404 r600_vram_scratch_fini(rdev);
2402 radeon_gem_fini(rdev); 2405 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
4086 case 1: /* D1 vblank/vline */ 4086 case 1: /* D1 vblank/vline */
4087 switch (src_data) { 4087 switch (src_data) {
4088 case 0: /* D1 vblank */ 4088 case 0: /* D1 vblank */
4089 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 4089 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4090 if (rdev->irq.crtc_vblank_int[0]) { 4090 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4091 drm_handle_vblank(rdev->ddev, 0); 4091
4092 rdev->pm.vblank_sync = true; 4092 if (rdev->irq.crtc_vblank_int[0]) {
4093 wake_up(&rdev->irq.vblank_queue); 4093 drm_handle_vblank(rdev->ddev, 0);
4094 } 4094 rdev->pm.vblank_sync = true;
4095 if (atomic_read(&rdev->irq.pflip[0])) 4095 wake_up(&rdev->irq.vblank_queue);
4096 radeon_crtc_handle_vblank(rdev, 0);
4097 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4098 DRM_DEBUG("IH: D1 vblank\n");
4099 } 4096 }
4097 if (atomic_read(&rdev->irq.pflip[0]))
4098 radeon_crtc_handle_vblank(rdev, 0);
4099 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4100 DRM_DEBUG("IH: D1 vblank\n");
4101
4100 break; 4102 break;
4101 case 1: /* D1 vline */ 4103 case 1: /* D1 vline */
4102 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 4104 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4103 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4105 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4104 DRM_DEBUG("IH: D1 vline\n"); 4106
4105 } 4107 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4108 DRM_DEBUG("IH: D1 vline\n");
4109
4106 break; 4110 break;
4107 default: 4111 default:
4108 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4112 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
4112 case 5: /* D2 vblank/vline */ 4116 case 5: /* D2 vblank/vline */
4113 switch (src_data) { 4117 switch (src_data) {
4114 case 0: /* D2 vblank */ 4118 case 0: /* D2 vblank */
4115 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 4119 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4116 if (rdev->irq.crtc_vblank_int[1]) { 4120 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4117 drm_handle_vblank(rdev->ddev, 1); 4121
4118 rdev->pm.vblank_sync = true; 4122 if (rdev->irq.crtc_vblank_int[1]) {
4119 wake_up(&rdev->irq.vblank_queue); 4123 drm_handle_vblank(rdev->ddev, 1);
4120 } 4124 rdev->pm.vblank_sync = true;
4121 if (atomic_read(&rdev->irq.pflip[1])) 4125 wake_up(&rdev->irq.vblank_queue);
4122 radeon_crtc_handle_vblank(rdev, 1);
4123 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4124 DRM_DEBUG("IH: D2 vblank\n");
4125 } 4126 }
4127 if (atomic_read(&rdev->irq.pflip[1]))
4128 radeon_crtc_handle_vblank(rdev, 1);
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4130 DRM_DEBUG("IH: D2 vblank\n");
4131
4126 break; 4132 break;
4127 case 1: /* D1 vline */ 4133 case 1: /* D1 vline */
4128 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 4134 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4135 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4130 DRM_DEBUG("IH: D2 vline\n"); 4136
4131 } 4137 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4138 DRM_DEBUG("IH: D2 vline\n");
4139
4132 break; 4140 break;
4133 default: 4141 default:
4134 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4142 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
4148 case 19: /* HPD/DAC hotplug */ 4156 case 19: /* HPD/DAC hotplug */
4149 switch (src_data) { 4157 switch (src_data) {
4150 case 0: 4158 case 0:
4151 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4159 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4152 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4160 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4153 queue_hotplug = true; 4161
4154 DRM_DEBUG("IH: HPD1\n"); 4162 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4155 } 4163 queue_hotplug = true;
4164 DRM_DEBUG("IH: HPD1\n");
4156 break; 4165 break;
4157 case 1: 4166 case 1:
4158 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4167 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4159 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4168 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4160 queue_hotplug = true; 4169
4161 DRM_DEBUG("IH: HPD2\n"); 4170 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4162 } 4171 queue_hotplug = true;
4172 DRM_DEBUG("IH: HPD2\n");
4163 break; 4173 break;
4164 case 4: 4174 case 4:
4165 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4175 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4166 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4176 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4167 queue_hotplug = true; 4177
4168 DRM_DEBUG("IH: HPD3\n"); 4178 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4169 } 4179 queue_hotplug = true;
4180 DRM_DEBUG("IH: HPD3\n");
4170 break; 4181 break;
4171 case 5: 4182 case 5:
4172 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4183 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4173 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4184 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4174 queue_hotplug = true; 4185
4175 DRM_DEBUG("IH: HPD4\n"); 4186 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4176 } 4187 queue_hotplug = true;
4188 DRM_DEBUG("IH: HPD4\n");
4177 break; 4189 break;
4178 case 10: 4190 case 10:
4179 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4191 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4180 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4192 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4181 queue_hotplug = true; 4193
4182 DRM_DEBUG("IH: HPD5\n"); 4194 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4183 } 4195 queue_hotplug = true;
4196 DRM_DEBUG("IH: HPD5\n");
4184 break; 4197 break;
4185 case 12: 4198 case 12:
4186 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4199 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4187 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4200 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4188 queue_hotplug = true; 4201
4189 DRM_DEBUG("IH: HPD6\n"); 4202 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4190 } 4203 queue_hotplug = true;
4204 DRM_DEBUG("IH: HPD6\n");
4205
4191 break; 4206 break;
4192 default: 4207 default:
4193 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4208 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
4197 case 21: /* hdmi */ 4212 case 21: /* hdmi */
4198 switch (src_data) { 4213 switch (src_data) {
4199 case 4: 4214 case 4:
4200 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4215 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4201 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4216 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4202 queue_hdmi = true; 4217
4203 DRM_DEBUG("IH: HDMI0\n"); 4218 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4204 } 4219 queue_hdmi = true;
4220 DRM_DEBUG("IH: HDMI0\n");
4221
4205 break; 4222 break;
4206 case 5: 4223 case 5:
4207 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4224 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4208 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4225 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4209 queue_hdmi = true; 4226
4210 DRM_DEBUG("IH: HDMI1\n"); 4227 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4211 } 4228 queue_hdmi = true;
4229 DRM_DEBUG("IH: HDMI1\n");
4230
4212 break; 4231 break;
4213 default: 4232 default:
4214 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4233 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
2483 struct drm_buf *buf; 2483 struct drm_buf *buf;
2484 u32 *buffer; 2484 u32 *buffer;
2485 const u8 __user *data; 2485 const u8 __user *data;
2486 int size, pass_size; 2486 unsigned int size, pass_size;
2487 u64 src_offset, dst_offset; 2487 u64 src_offset, dst_offset;
2488 2488
2489 if (!radeon_check_offset(dev_priv, tex->offset)) { 2489 if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
245static void radeon_audio_enable(struct radeon_device *rdev, 245static void radeon_audio_enable(struct radeon_device *rdev,
246 struct r600_audio_pin *pin, u8 enable_mask) 246 struct r600_audio_pin *pin, u8 enable_mask)
247{ 247{
248 struct drm_encoder *encoder;
249 struct radeon_encoder *radeon_encoder;
250 struct radeon_encoder_atom_dig *dig;
251 int pin_count = 0;
252
253 if (!pin)
254 return;
255
256 if (rdev->mode_info.mode_config_initialized) {
257 list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
258 if (radeon_encoder_is_digital(encoder)) {
259 radeon_encoder = to_radeon_encoder(encoder);
260 dig = radeon_encoder->enc_priv;
261 if (dig->pin == pin)
262 pin_count++;
263 }
264 }
265
266 if ((pin_count > 1) && (enable_mask == 0))
267 return;
268 }
269
248 if (rdev->audio.funcs->enable) 270 if (rdev->audio.funcs->enable)
249 rdev->audio.funcs->enable(rdev, pin, enable_mask); 271 rdev->audio.funcs->enable(rdev, pin, enable_mask);
250} 272}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
336 358
337static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) 359static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
338{ 360{
339 struct radeon_encoder *radeon_encoder; 361 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
340 struct drm_connector *connector; 362 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
341 struct radeon_connector *radeon_connector = NULL;
342 struct cea_sad *sads; 363 struct cea_sad *sads;
343 int sad_count; 364 int sad_count;
344 365
345 list_for_each_entry(connector, 366 if (!connector)
346 &encoder->dev->mode_config.connector_list, head) {
347 if (connector->encoder == encoder) {
348 radeon_connector = to_radeon_connector(connector);
349 break;
350 }
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return; 367 return;
356 }
357 368
358 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); 369 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
359 if (sad_count <= 0) { 370 if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
362 } 373 }
363 BUG_ON(!sads); 374 BUG_ON(!sads);
364 375
365 radeon_encoder = to_radeon_encoder(encoder);
366
367 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) 376 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
368 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); 377 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
369 378
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
372 381
373static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) 382static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
374{ 383{
384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
375 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 385 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
376 struct drm_connector *connector;
377 struct radeon_connector *radeon_connector = NULL;
378 u8 *sadb = NULL; 386 u8 *sadb = NULL;
379 int sad_count; 387 int sad_count;
380 388
381 list_for_each_entry(connector, 389 if (!connector)
382 &encoder->dev->mode_config.connector_list, head) {
383 if (connector->encoder == encoder) {
384 radeon_connector = to_radeon_connector(connector);
385 break;
386 }
387 }
388
389 if (!radeon_connector) {
390 DRM_ERROR("Couldn't find encoder's connector\n");
391 return; 390 return;
392 }
393 391
394 sad_count = drm_edid_to_speaker_allocation( 392 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
395 radeon_connector_edid(connector), &sadb); 393 &sadb);
396 if (sad_count < 0) { 394 if (sad_count < 0) {
397 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", 395 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
398 sad_count); 396 sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
406} 404}
407 405
408static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, 406static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
409 struct drm_display_mode *mode) 407 struct drm_display_mode *mode)
410{ 408{
411 struct radeon_encoder *radeon_encoder; 409 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
412 struct drm_connector *connector; 410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
413 struct radeon_connector *radeon_connector = 0;
414
415 list_for_each_entry(connector,
416 &encoder->dev->mode_config.connector_list, head) {
417 if (connector->encoder == encoder) {
418 radeon_connector = to_radeon_connector(connector);
419 break;
420 }
421 }
422 411
423 if (!radeon_connector) { 412 if (!connector)
424 DRM_ERROR("Couldn't find encoder's connector\n");
425 return; 413 return;
426 }
427
428 radeon_encoder = to_radeon_encoder(encoder);
429 414
430 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) 415 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
431 radeon_encoder->audio->write_latency_fields(encoder, connector, mode); 416 radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
451} 436}
452 437
453void radeon_audio_detect(struct drm_connector *connector, 438void radeon_audio_detect(struct drm_connector *connector,
439 struct drm_encoder *encoder,
454 enum drm_connector_status status) 440 enum drm_connector_status status)
455{ 441{
456 struct radeon_device *rdev; 442 struct drm_device *dev = connector->dev;
457 struct radeon_encoder *radeon_encoder; 443 struct radeon_device *rdev = dev->dev_private;
444 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
458 struct radeon_encoder_atom_dig *dig; 445 struct radeon_encoder_atom_dig *dig;
459 446
460 if (!connector || !connector->encoder) 447 if (!radeon_audio_chipset_supported(rdev))
461 return; 448 return;
462 449
463 rdev = connector->encoder->dev->dev_private; 450 if (!radeon_encoder_is_digital(encoder))
464
465 if (!radeon_audio_chipset_supported(rdev))
466 return; 451 return;
467 452
468 radeon_encoder = to_radeon_encoder(connector->encoder);
469 dig = radeon_encoder->enc_priv; 453 dig = radeon_encoder->enc_priv;
470 454
471 if (status == connector_status_connected) { 455 if (status == connector_status_connected) {
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476
477 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 456 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
478 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 457 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 458
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
486 radeon_encoder->audio = rdev->audio.hdmi_funcs; 465 radeon_encoder->audio = rdev->audio.hdmi_funcs;
487 } 466 }
488 467
489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 468 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 469 if (!dig->pin)
470 dig->pin = radeon_audio_get_pin(encoder);
471 radeon_audio_enable(rdev, dig->pin, 0xf);
472 } else {
473 radeon_audio_enable(rdev, dig->pin, 0);
474 dig->pin = NULL;
475 }
491 } else { 476 } else {
492 radeon_audio_enable(rdev, dig->afmt->pin, 0); 477 radeon_audio_enable(rdev, dig->pin, 0);
493 dig->afmt->pin = NULL; 478 dig->pin = NULL;
494 } 479 }
495} 480}
496 481
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
518} 503}
519 504
520static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, 505static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
521 struct drm_display_mode *mode) 506 struct drm_display_mode *mode)
522{ 507{
523 struct radeon_device *rdev = encoder->dev->dev_private; 508 struct radeon_device *rdev = encoder->dev->dev_private;
524 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
525 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 510 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
526 struct drm_connector *connector; 511 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
527 struct radeon_connector *radeon_connector = NULL;
528 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 512 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
529 struct hdmi_avi_infoframe frame; 513 struct hdmi_avi_infoframe frame;
530 int err; 514 int err;
531 515
532 list_for_each_entry(connector, 516 if (!connector)
533 &encoder->dev->mode_config.connector_list, head) { 517 return -EINVAL;
534 if (connector->encoder == encoder) {
535 radeon_connector = to_radeon_connector(connector);
536 break;
537 }
538 }
539
540 if (!radeon_connector) {
541 DRM_ERROR("Couldn't find encoder's connector\n");
542 return -ENOENT;
543 }
544 518
545 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 519 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
546 if (err < 0) { 520 if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
563 return err; 537 return err;
564 } 538 }
565 539
566 if (dig && dig->afmt && 540 if (dig && dig->afmt && radeon_encoder->audio &&
567 radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) 541 radeon_encoder->audio->set_avi_packet)
568 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, 542 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
569 buffer, sizeof(buffer)); 543 buffer, sizeof(buffer));
570 544
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
722{ 696{
723 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 697 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
724 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 698 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
699 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
725 700
726 if (!dig || !dig->afmt) 701 if (!dig || !dig->afmt)
727 return; 702 return;
728 703
729 radeon_audio_set_mute(encoder, true); 704 if (!connector)
705 return;
730 706
731 radeon_audio_write_speaker_allocation(encoder); 707 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
732 radeon_audio_write_sad_regs(encoder); 708 radeon_audio_set_mute(encoder, true);
733 radeon_audio_write_latency_fields(encoder, mode);
734 radeon_audio_set_dto(encoder, mode->clock);
735 radeon_audio_set_vbi_packet(encoder);
736 radeon_hdmi_set_color_depth(encoder);
737 radeon_audio_update_acr(encoder, mode->clock);
738 radeon_audio_set_audio_packet(encoder);
739 radeon_audio_select_pin(encoder);
740 709
741 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 710 radeon_audio_write_speaker_allocation(encoder);
742 return; 711 radeon_audio_write_sad_regs(encoder);
712 radeon_audio_write_latency_fields(encoder, mode);
713 radeon_audio_set_dto(encoder, mode->clock);
714 radeon_audio_set_vbi_packet(encoder);
715 radeon_hdmi_set_color_depth(encoder);
716 radeon_audio_update_acr(encoder, mode->clock);
717 radeon_audio_set_audio_packet(encoder);
718 radeon_audio_select_pin(encoder);
719
720 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
721 return;
743 722
744 radeon_audio_set_mute(encoder, false); 723 radeon_audio_set_mute(encoder, false);
724 } else {
725 radeon_hdmi_set_color_depth(encoder);
726
727 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
728 return;
729 }
745} 730}
746 731
747static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 732static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
748 struct drm_display_mode *mode) 733 struct drm_display_mode *mode)
749{ 734{
750 struct drm_device *dev = encoder->dev; 735 struct drm_device *dev = encoder->dev;
751 struct radeon_device *rdev = dev->dev_private; 736 struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
759 if (!dig || !dig->afmt) 744 if (!dig || !dig->afmt)
760 return; 745 return;
761 746
762 radeon_audio_write_speaker_allocation(encoder); 747 if (!connector)
763 radeon_audio_write_sad_regs(encoder);
764 radeon_audio_write_latency_fields(encoder, mode);
765 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
766 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
767 else
768 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
769 radeon_audio_set_audio_packet(encoder);
770 radeon_audio_select_pin(encoder);
771
772 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
773 return; 748 return;
749
750 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
751 radeon_audio_write_speaker_allocation(encoder);
752 radeon_audio_write_sad_regs(encoder);
753 radeon_audio_write_latency_fields(encoder, mode);
754 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
755 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
756 else
757 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
758 radeon_audio_set_audio_packet(encoder);
759 radeon_audio_select_pin(encoder);
760
761 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
762 return;
763 }
774} 764}
775 765
776void radeon_audio_mode_set(struct drm_encoder *encoder, 766void radeon_audio_mode_set(struct drm_encoder *encoder,
777 struct drm_display_mode *mode) 767 struct drm_display_mode *mode)
778{ 768{
779 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 769 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
780 770
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
68 68
69int radeon_audio_init(struct radeon_device *rdev); 69int radeon_audio_init(struct radeon_device *rdev);
70void radeon_audio_detect(struct drm_connector *connector, 70void radeon_audio_detect(struct drm_connector *connector,
71 enum drm_connector_status status); 71 struct drm_encoder *encoder,
72 enum drm_connector_status status);
72u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, 73u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
73 u32 offset, u32 reg); 74 u32 offset, u32 reg);
74void radeon_audio_endpoint_wreg(struct radeon_device *rdev, 75void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1255 1255
1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && 1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { 1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
1258 u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
1259
1260 if (hss > lvds->native_mode.hdisplay)
1261 hss = (10 - 1) * 8;
1262
1258 lvds->native_mode.htotal = lvds->native_mode.hdisplay + 1263 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
1259 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; 1264 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
1260 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + 1265 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
1261 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; 1266 hss;
1262 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + 1267 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
1263 (RBIOS8(tmp + 23) * 8); 1268 (RBIOS8(tmp + 23) * 8);
1264 1269
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if ((radeon_audio != 0) && radeon_connector->use_digital) {
1383 radeon_audio_detect(connector, ret); 1383 const struct drm_connector_helper_funcs *connector_funcs =
1384 connector->helper_private;
1385
1386 encoder = connector_funcs->best_encoder(connector);
1387 if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
1388 radeon_connector_get_edid(connector);
1389 radeon_audio_detect(connector, encoder, ret);
1390 }
1391 }
1384 1392
1385exit: 1393exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1394 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1725
1718 radeon_connector_update_scratch_regs(connector, ret); 1726 radeon_connector_update_scratch_regs(connector, ret);
1719 1727
1720 if (radeon_audio != 0) 1728 if ((radeon_audio != 0) && encoder) {
1721 radeon_audio_detect(connector, ret); 1729 radeon_connector_get_edid(connector);
1730 radeon_audio_detect(connector, encoder, ret);
1731 }
1722 1732
1723out: 1733out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1734 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
91 struct radeon_device *rdev = crtc->dev->dev_private; 91 struct radeon_device *rdev = crtc->dev->dev_private;
92 92
93 if (ASIC_IS_DCE4(rdev)) { 93 if (ASIC_IS_DCE4(rdev)) {
94 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
95 upper_32_bits(radeon_crtc->cursor_addr));
96 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
97 lower_32_bits(radeon_crtc->cursor_addr));
94 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 98 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
95 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | 99 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
96 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 100 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
97 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 101 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
98 } else if (ASIC_IS_AVIVO(rdev)) { 102 } else if (ASIC_IS_AVIVO(rdev)) {
103 if (rdev->family >= CHIP_RV770) {
104 if (radeon_crtc->crtc_id)
105 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
106 upper_32_bits(radeon_crtc->cursor_addr));
107 else
108 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
109 upper_32_bits(radeon_crtc->cursor_addr));
110 }
111
112 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
113 lower_32_bits(radeon_crtc->cursor_addr));
99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 114 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 115 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 116 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
102 } else { 117 } else {
118 /* offset is from DISP(2)_BASE_ADDRESS */
119 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
120 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
121
103 switch (radeon_crtc->crtc_id) { 122 switch (radeon_crtc->crtc_id) {
104 case 0: 123 case 0:
105 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); 124 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
205 | (x << 16) 224 | (x << 16)
206 | y)); 225 | y));
207 /* offset is from DISP(2)_BASE_ADDRESS */ 226 /* offset is from DISP(2)_BASE_ADDRESS */
208 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 227 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
209 (yorigin * 256))); 228 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
229 yorigin * 256);
210 } 230 }
211 231
212 radeon_crtc->cursor_x = x; 232 radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
227 return ret; 247 return ret;
228} 248}
229 249
230static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
231{
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_device *rdev = crtc->dev->dev_private;
234 struct radeon_bo *robj = gem_to_radeon_bo(obj);
235 uint64_t gpu_addr;
236 int ret;
237
238 ret = radeon_bo_reserve(robj, false);
239 if (unlikely(ret != 0))
240 goto fail;
241 /* Only 27 bit offset for legacy cursor */
242 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
243 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
244 &gpu_addr);
245 radeon_bo_unreserve(robj);
246 if (ret)
247 goto fail;
248
249 if (ASIC_IS_DCE4(rdev)) {
250 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
251 upper_32_bits(gpu_addr));
252 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
253 gpu_addr & 0xffffffff);
254 } else if (ASIC_IS_AVIVO(rdev)) {
255 if (rdev->family >= CHIP_RV770) {
256 if (radeon_crtc->crtc_id)
257 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
258 else
259 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
260 }
261 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
262 gpu_addr & 0xffffffff);
263 } else {
264 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
265 /* offset is from DISP(2)_BASE_ADDRESS */
266 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
267 }
268
269 return 0;
270
271fail:
272 drm_gem_object_unreference_unlocked(obj);
273
274 return ret;
275}
276
277int radeon_crtc_cursor_set2(struct drm_crtc *crtc, 250int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
278 struct drm_file *file_priv, 251 struct drm_file *file_priv,
279 uint32_t handle, 252 uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
283 int32_t hot_y) 256 int32_t hot_y)
284{ 257{
285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 258 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
259 struct radeon_device *rdev = crtc->dev->dev_private;
286 struct drm_gem_object *obj; 260 struct drm_gem_object *obj;
261 struct radeon_bo *robj;
287 int ret; 262 int ret;
288 263
289 if (!handle) { 264 if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
305 return -ENOENT; 280 return -ENOENT;
306 } 281 }
307 282
283 robj = gem_to_radeon_bo(obj);
284 ret = radeon_bo_reserve(robj, false);
285 if (ret != 0) {
286 drm_gem_object_unreference_unlocked(obj);
287 return ret;
288 }
289 /* Only 27 bit offset for legacy cursor */
290 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
291 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
292 &radeon_crtc->cursor_addr);
293 radeon_bo_unreserve(robj);
294 if (ret) {
295 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
296 drm_gem_object_unreference_unlocked(obj);
297 return ret;
298 }
299
308 radeon_crtc->cursor_width = width; 300 radeon_crtc->cursor_width = width;
309 radeon_crtc->cursor_height = height; 301 radeon_crtc->cursor_height = height;
310 302
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
323 radeon_crtc->cursor_hot_y = hot_y; 315 radeon_crtc->cursor_hot_y = hot_y;
324 } 316 }
325 317
326 ret = radeon_set_cursor(crtc, obj); 318 radeon_show_cursor(crtc);
327
328 if (ret)
329 DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
330 ret);
331 else
332 radeon_show_cursor(crtc);
333 319
334 radeon_lock_cursor(crtc, false); 320 radeon_lock_cursor(crtc, false);
335 321
@@ -341,8 +327,7 @@ unpin:
341 radeon_bo_unpin(robj); 327 radeon_bo_unpin(robj);
342 radeon_bo_unreserve(robj); 328 radeon_bo_unreserve(robj);
343 } 329 }
344 if (radeon_crtc->cursor_bo != obj) 330 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
345 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
346 } 331 }
347 332
348 radeon_crtc->cursor_bo = obj; 333 radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
360void radeon_cursor_reset(struct drm_crtc *crtc) 345void radeon_cursor_reset(struct drm_crtc *crtc)
361{ 346{
362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
363 int ret;
364 348
365 if (radeon_crtc->cursor_bo) { 349 if (radeon_crtc->cursor_bo) {
366 radeon_lock_cursor(crtc, true); 350 radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, 352 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
369 radeon_crtc->cursor_y); 353 radeon_crtc->cursor_y);
370 354
371 ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo); 355 radeon_show_cursor(crtc);
372 if (ret)
373 DRM_ERROR("radeon_set_cursor returned %d, not showing "
374 "cursor\n", ret);
375 else
376 radeon_show_cursor(crtc);
377 356
378 radeon_lock_cursor(crtc, false); 357 radeon_lock_cursor(crtc, false);
379 } 358 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b1168bd6..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
1080} 1080}
1081 1081
1082/** 1082/**
1083 * Determine a sensible default GART size according to ASIC family.
1084 *
1085 * @family ASIC family name
1086 */
1087static int radeon_gart_size_auto(enum radeon_family family)
1088{
1089 /* default to a larger gart size on newer asics */
1090 if (family >= CHIP_TAHITI)
1091 return 2048;
1092 else if (family >= CHIP_RV770)
1093 return 1024;
1094 else
1095 return 512;
1096}
1097
1098/**
1083 * radeon_check_arguments - validate module params 1099 * radeon_check_arguments - validate module params
1084 * 1100 *
1085 * @rdev: radeon_device pointer 1101 * @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1097 } 1113 }
1098 1114
1099 if (radeon_gart_size == -1) { 1115 if (radeon_gart_size == -1) {
1100 /* default to a larger gart size on newer asics */ 1116 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1101 if (rdev->family >= CHIP_RV770)
1102 radeon_gart_size = 1024;
1103 else
1104 radeon_gart_size = 512;
1105 } 1117 }
1106 /* gtt size must be power of two and greater or equal to 32M */ 1118 /* gtt size must be power of two and greater or equal to 32M */
1107 if (radeon_gart_size < 32) { 1119 if (radeon_gart_size < 32) {
1108 dev_warn(rdev->dev, "gart size (%d) too small\n", 1120 dev_warn(rdev->dev, "gart size (%d) too small\n",
1109 radeon_gart_size); 1121 radeon_gart_size);
1110 if (rdev->family >= CHIP_RV770) 1122 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1111 radeon_gart_size = 1024;
1112 else
1113 radeon_gart_size = 512;
1114 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1123 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1115 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1124 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1116 radeon_gart_size); 1125 radeon_gart_size);
1117 if (rdev->family >= CHIP_RV770) 1126 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1118 radeon_gart_size = 1024;
1119 else
1120 radeon_gart_size = 512;
1121 } 1127 }
1122 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1128 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1123 1129
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1572 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1573 } 1579 }
1574 1580
1575 /* unpin the front buffers */ 1581 /* unpin the front buffers and cursors */
1576 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1583 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1577 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 1584 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1578 struct radeon_bo *robj; 1585 struct radeon_bo *robj;
1579 1586
1587 if (radeon_crtc->cursor_bo) {
1588 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1589 r = radeon_bo_reserve(robj, false);
1590 if (r == 0) {
1591 radeon_bo_unpin(robj);
1592 radeon_bo_unreserve(robj);
1593 }
1594 }
1595
1580 if (rfb == NULL || rfb->obj == NULL) { 1596 if (rfb == NULL || rfb->obj == NULL) {
1581 continue; 1597 continue;
1582 } 1598 }
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1639{ 1655{
1640 struct drm_connector *connector; 1656 struct drm_connector *connector;
1641 struct radeon_device *rdev = dev->dev_private; 1657 struct radeon_device *rdev = dev->dev_private;
1658 struct drm_crtc *crtc;
1642 int r; 1659 int r;
1643 1660
1644 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1661 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1678 1695
1679 radeon_restore_bios_scratch_regs(rdev); 1696 radeon_restore_bios_scratch_regs(rdev);
1680 1697
1698 /* pin cursors */
1699 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1700 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1701
1702 if (radeon_crtc->cursor_bo) {
1703 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1704 r = radeon_bo_reserve(robj, false);
1705 if (r == 0) {
1706 /* Only 27 bit offset for legacy cursor */
1707 r = radeon_bo_pin_restricted(robj,
1708 RADEON_GEM_DOMAIN_VRAM,
1709 ASIC_IS_AVIVO(rdev) ?
1710 0 : 1 << 27,
1711 &radeon_crtc->cursor_addr);
1712 if (r != 0)
1713 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1714 radeon_bo_unreserve(robj);
1715 }
1716 }
1717 }
1718
1681 /* init dig PHYs, disp eng pll */ 1719 /* init dig PHYs, disp eng pll */
1682 if (rdev->is_atom_bios) { 1720 if (rdev->is_atom_bios) {
1683 radeon_atom_encoder_init(rdev); 1721 radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 634793ea8418..aeb676708e60 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
257 } 257 }
258 258
259 info->par = rfbdev; 259 info->par = rfbdev;
260 info->skip_vt_switch = true;
260 261
261 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 262 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
262 if (ret) { 263 if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 5450fa95a47e..c4777c8d0312 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
260 } 260 }
261 } 261 }
262 } 262 }
263 mb(); 263 if (rdev->gart.ptr) {
264 radeon_gart_tlb_flush(rdev); 264 mb();
265 radeon_gart_tlb_flush(rdev);
266 }
265} 267}
266 268
267/** 269/**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
306 page_base += RADEON_GPU_PAGE_SIZE; 308 page_base += RADEON_GPU_PAGE_SIZE;
307 } 309 }
308 } 310 }
309 mb(); 311 if (rdev->gart.ptr) {
310 radeon_gart_tlb_flush(rdev); 312 mb();
313 radeon_gart_tlb_flush(rdev);
314 }
311 return 0; 315 return 0;
312} 316}
313 317
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..3dcc5733ff69 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach) 37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); 38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj);
39 radeon_bo_unref(&robj); 40 radeon_bo_unref(&robj);
40 } 41 }
41} 42}
@@ -428,7 +429,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 429int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp) 430 struct drm_file *filp)
430{ 431{
431 struct radeon_device *rdev = dev->dev_private;
432 struct drm_radeon_gem_busy *args = data; 432 struct drm_radeon_gem_busy *args = data;
433 struct drm_gem_object *gobj; 433 struct drm_gem_object *gobj;
434 struct radeon_bo *robj; 434 struct radeon_bo *robj;
@@ -440,10 +440,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
440 return -ENOENT; 440 return -ENOENT;
441 } 441 }
442 robj = gem_to_radeon_bo(gobj); 442 robj = gem_to_radeon_bo(gobj);
443 r = radeon_bo_wait(robj, &cur_placement, true); 443
444 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
445 if (r == 0)
446 r = -EBUSY;
447 else
448 r = 0;
449
450 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
444 args->domain = radeon_mem_type_to_domain(cur_placement); 451 args->domain = radeon_mem_type_to_domain(cur_placement);
445 drm_gem_object_unreference_unlocked(gobj); 452 drm_gem_object_unreference_unlocked(gobj);
446 r = radeon_gem_handle_lockup(rdev, r);
447 return r; 453 return r;
448} 454}
449 455
@@ -471,6 +477,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
471 r = ret; 477 r = ret;
472 478
473 /* Flush HDP cache via MMIO if necessary */ 479 /* Flush HDP cache via MMIO if necessary */
480 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
474 if (rdev->asic->mmio_hdp_flush && 481 if (rdev->asic->mmio_hdp_flush &&
475 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 482 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
476 robj->rdev->asic->mmio_hdp_flush(rdev); 483 robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa464f3..171d3e43c30c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
79 struct drm_mode_config *mode_config = &dev->mode_config; 79 struct drm_mode_config *mode_config = &dev->mode_config;
80 struct drm_connector *connector; 80 struct drm_connector *connector;
81 81
82 /* we can race here at startup, some boards seem to trigger
83 * hotplug irqs when they shouldn't. */
84 if (!rdev->mode_info.mode_config_initialized)
85 return;
86
82 mutex_lock(&mode_config->mutex); 87 mutex_lock(&mode_config->mutex);
83 if (mode_config->num_connector) { 88 if (mode_config->num_connector) {
84 list_for_each_entry(connector, &mode_config->connector_list, head) 89 list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
237 int offset; 237 int offset;
238 bool last_buffer_filled_status; 238 bool last_buffer_filled_status;
239 int id; 239 int id;
240 struct r600_audio_pin *pin;
241}; 240};
242 241
243struct radeon_mode_info { 242struct radeon_mode_info {
@@ -343,7 +342,6 @@ struct radeon_crtc {
343 int max_cursor_width; 342 int max_cursor_width;
344 int max_cursor_height; 343 int max_cursor_height;
345 uint32_t legacy_display_base_addr; 344 uint32_t legacy_display_base_addr;
346 uint32_t legacy_cursor_offset;
347 enum radeon_rmx_type rmx_type; 345 enum radeon_rmx_type rmx_type;
348 u8 h_border; 346 u8 h_border;
349 u8 v_border; 347 u8 v_border;
@@ -440,6 +438,7 @@ struct radeon_encoder_atom_dig {
440 uint8_t backlight_level; 438 uint8_t backlight_level;
441 int panel_mode; 439 int panel_mode;
442 struct radeon_afmt *afmt; 440 struct radeon_afmt *afmt;
441 struct r600_audio_pin *pin;
443 int active_mst_links; 442 int active_mst_links;
444}; 443};
445 444
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 318165d4855c..676362769b8d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
75 bo = container_of(tbo, struct radeon_bo, tbo); 75 bo = container_of(tbo, struct radeon_bo, tbo);
76 76
77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 77 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78 radeon_mn_unregister(bo);
79 78
80 mutex_lock(&bo->rdev->gem.mutex); 79 mutex_lock(&bo->rdev->gem.mutex);
81 list_del_init(&bo->list); 80 list_del_init(&bo->list);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ec10533a49b8..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
493 } 493 }
494 494
495 if (bo_va->it.start || bo_va->it.last) { 495 if (bo_va->it.start || bo_va->it.last) {
496 spin_lock(&vm->status_lock); 496 /* add a clone of the bo_va to clear the old address */
497 if (list_empty(&bo_va->vm_status)) { 497 struct radeon_bo_va *tmp;
498 /* add a clone of the bo_va to clear the old address */ 498 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
499 struct radeon_bo_va *tmp; 499 if (!tmp) {
500 spin_unlock(&vm->status_lock); 500 mutex_unlock(&vm->mutex);
501 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 501 r = -ENOMEM;
502 if (!tmp) { 502 goto error_unreserve;
503 mutex_unlock(&vm->mutex);
504 r = -ENOMEM;
505 goto error_unreserve;
506 }
507 tmp->it.start = bo_va->it.start;
508 tmp->it.last = bo_va->it.last;
509 tmp->vm = vm;
510 tmp->bo = radeon_bo_ref(bo_va->bo);
511 spin_lock(&vm->status_lock);
512 list_add(&tmp->vm_status, &vm->freed);
513 } 503 }
514 spin_unlock(&vm->status_lock); 504 tmp->it.start = bo_va->it.start;
505 tmp->it.last = bo_va->it.last;
506 tmp->vm = vm;
507 tmp->bo = radeon_bo_ref(bo_va->bo);
515 508
516 interval_tree_remove(&bo_va->it, &vm->va); 509 interval_tree_remove(&bo_va->it, &vm->va);
510 spin_lock(&vm->status_lock);
517 bo_va->it.start = 0; 511 bo_va->it.start = 0;
518 bo_va->it.last = 0; 512 bo_va->it.last = 0;
513 list_del_init(&bo_va->vm_status);
514 list_add(&tmp->vm_status, &vm->freed);
515 spin_unlock(&vm->status_lock);
519 } 516 }
520 517
521 if (soffset || eoffset) { 518 if (soffset || eoffset) {
519 spin_lock(&vm->status_lock);
522 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
523 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset - 1;
524 interval_tree_insert(&bo_va->it, &vm->va);
525 spin_lock(&vm->status_lock);
526 list_add(&bo_va->vm_status, &vm->cleared); 522 list_add(&bo_va->vm_status, &vm->cleared);
527 spin_unlock(&vm->status_lock); 523 spin_unlock(&vm->status_lock);
524 interval_tree_insert(&bo_va->it, &vm->va);
528 } 525 }
529 526
530 bo_va->flags = flags; 527 bo_va->flags = flags;
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1158 1155
1159 list_for_each_entry(bo_va, &bo->va, bo_list) { 1156 list_for_each_entry(bo_va, &bo->va, bo_list) {
1160 spin_lock(&bo_va->vm->status_lock); 1157 spin_lock(&bo_va->vm->status_lock);
1161 if (list_empty(&bo_va->vm_status)) 1158 if (list_empty(&bo_va->vm_status) &&
1159 (bo_va->it.start || bo_va->it.last))
1162 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1160 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1163 spin_unlock(&bo_va->vm->status_lock); 1161 spin_unlock(&bo_va->vm->status_lock);
1164 } 1162 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
6466 case 1: /* D1 vblank/vline */ 6466 case 1: /* D1 vblank/vline */
6467 switch (src_data) { 6467 switch (src_data) {
6468 case 0: /* D1 vblank */ 6468 case 0: /* D1 vblank */
6469 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 6469 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6470 if (rdev->irq.crtc_vblank_int[0]) { 6470 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6471 drm_handle_vblank(rdev->ddev, 0); 6471
6472 rdev->pm.vblank_sync = true; 6472 if (rdev->irq.crtc_vblank_int[0]) {
6473 wake_up(&rdev->irq.vblank_queue); 6473 drm_handle_vblank(rdev->ddev, 0);
6474 } 6474 rdev->pm.vblank_sync = true;
6475 if (atomic_read(&rdev->irq.pflip[0])) 6475 wake_up(&rdev->irq.vblank_queue);
6476 radeon_crtc_handle_vblank(rdev, 0);
6477 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6478 DRM_DEBUG("IH: D1 vblank\n");
6479 } 6476 }
6477 if (atomic_read(&rdev->irq.pflip[0]))
6478 radeon_crtc_handle_vblank(rdev, 0);
6479 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6480 DRM_DEBUG("IH: D1 vblank\n");
6481
6480 break; 6482 break;
6481 case 1: /* D1 vline */ 6483 case 1: /* D1 vline */
6482 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 6484 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6483 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 6485 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6484 DRM_DEBUG("IH: D1 vline\n"); 6486
6485 } 6487 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6488 DRM_DEBUG("IH: D1 vline\n");
6489
6486 break; 6490 break;
6487 default: 6491 default:
6488 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6492 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
6492 case 2: /* D2 vblank/vline */ 6496 case 2: /* D2 vblank/vline */
6493 switch (src_data) { 6497 switch (src_data) {
6494 case 0: /* D2 vblank */ 6498 case 0: /* D2 vblank */
6495 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 6499 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6496 if (rdev->irq.crtc_vblank_int[1]) { 6500 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6497 drm_handle_vblank(rdev->ddev, 1); 6501
6498 rdev->pm.vblank_sync = true; 6502 if (rdev->irq.crtc_vblank_int[1]) {
6499 wake_up(&rdev->irq.vblank_queue); 6503 drm_handle_vblank(rdev->ddev, 1);
6500 } 6504 rdev->pm.vblank_sync = true;
6501 if (atomic_read(&rdev->irq.pflip[1])) 6505 wake_up(&rdev->irq.vblank_queue);
6502 radeon_crtc_handle_vblank(rdev, 1);
6503 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6504 DRM_DEBUG("IH: D2 vblank\n");
6505 } 6506 }
6507 if (atomic_read(&rdev->irq.pflip[1]))
6508 radeon_crtc_handle_vblank(rdev, 1);
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6510 DRM_DEBUG("IH: D2 vblank\n");
6511
6506 break; 6512 break;
6507 case 1: /* D2 vline */ 6513 case 1: /* D2 vline */
6508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 6514 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 6515 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6510 DRM_DEBUG("IH: D2 vline\n"); 6516
6511 } 6517 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6518 DRM_DEBUG("IH: D2 vline\n");
6519
6512 break; 6520 break;
6513 default: 6521 default:
6514 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6522 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
6518 case 3: /* D3 vblank/vline */ 6526 case 3: /* D3 vblank/vline */
6519 switch (src_data) { 6527 switch (src_data) {
6520 case 0: /* D3 vblank */ 6528 case 0: /* D3 vblank */
6521 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 6529 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6522 if (rdev->irq.crtc_vblank_int[2]) { 6530 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6523 drm_handle_vblank(rdev->ddev, 2); 6531
6524 rdev->pm.vblank_sync = true; 6532 if (rdev->irq.crtc_vblank_int[2]) {
6525 wake_up(&rdev->irq.vblank_queue); 6533 drm_handle_vblank(rdev->ddev, 2);
6526 } 6534 rdev->pm.vblank_sync = true;
6527 if (atomic_read(&rdev->irq.pflip[2])) 6535 wake_up(&rdev->irq.vblank_queue);
6528 radeon_crtc_handle_vblank(rdev, 2);
6529 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6530 DRM_DEBUG("IH: D3 vblank\n");
6531 } 6536 }
6537 if (atomic_read(&rdev->irq.pflip[2]))
6538 radeon_crtc_handle_vblank(rdev, 2);
6539 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6540 DRM_DEBUG("IH: D3 vblank\n");
6541
6532 break; 6542 break;
6533 case 1: /* D3 vline */ 6543 case 1: /* D3 vline */
6534 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 6544 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6535 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 6545 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6536 DRM_DEBUG("IH: D3 vline\n"); 6546
6537 } 6547 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6548 DRM_DEBUG("IH: D3 vline\n");
6549
6538 break; 6550 break;
6539 default: 6551 default:
6540 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6552 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
6544 case 4: /* D4 vblank/vline */ 6556 case 4: /* D4 vblank/vline */
6545 switch (src_data) { 6557 switch (src_data) {
6546 case 0: /* D4 vblank */ 6558 case 0: /* D4 vblank */
6547 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 6559 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6548 if (rdev->irq.crtc_vblank_int[3]) { 6560 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6549 drm_handle_vblank(rdev->ddev, 3); 6561
6550 rdev->pm.vblank_sync = true; 6562 if (rdev->irq.crtc_vblank_int[3]) {
6551 wake_up(&rdev->irq.vblank_queue); 6563 drm_handle_vblank(rdev->ddev, 3);
6552 } 6564 rdev->pm.vblank_sync = true;
6553 if (atomic_read(&rdev->irq.pflip[3])) 6565 wake_up(&rdev->irq.vblank_queue);
6554 radeon_crtc_handle_vblank(rdev, 3);
6555 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6556 DRM_DEBUG("IH: D4 vblank\n");
6557 } 6566 }
6567 if (atomic_read(&rdev->irq.pflip[3]))
6568 radeon_crtc_handle_vblank(rdev, 3);
6569 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6570 DRM_DEBUG("IH: D4 vblank\n");
6571
6558 break; 6572 break;
6559 case 1: /* D4 vline */ 6573 case 1: /* D4 vline */
6560 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 6574 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6561 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 6575 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6562 DRM_DEBUG("IH: D4 vline\n"); 6576
6563 } 6577 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6578 DRM_DEBUG("IH: D4 vline\n");
6579
6564 break; 6580 break;
6565 default: 6581 default:
6566 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6582 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
6570 case 5: /* D5 vblank/vline */ 6586 case 5: /* D5 vblank/vline */
6571 switch (src_data) { 6587 switch (src_data) {
6572 case 0: /* D5 vblank */ 6588 case 0: /* D5 vblank */
6573 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 6589 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6574 if (rdev->irq.crtc_vblank_int[4]) { 6590 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6575 drm_handle_vblank(rdev->ddev, 4); 6591
6576 rdev->pm.vblank_sync = true; 6592 if (rdev->irq.crtc_vblank_int[4]) {
6577 wake_up(&rdev->irq.vblank_queue); 6593 drm_handle_vblank(rdev->ddev, 4);
6578 } 6594 rdev->pm.vblank_sync = true;
6579 if (atomic_read(&rdev->irq.pflip[4])) 6595 wake_up(&rdev->irq.vblank_queue);
6580 radeon_crtc_handle_vblank(rdev, 4);
6581 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6582 DRM_DEBUG("IH: D5 vblank\n");
6583 } 6596 }
6597 if (atomic_read(&rdev->irq.pflip[4]))
6598 radeon_crtc_handle_vblank(rdev, 4);
6599 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6600 DRM_DEBUG("IH: D5 vblank\n");
6601
6584 break; 6602 break;
6585 case 1: /* D5 vline */ 6603 case 1: /* D5 vline */
6586 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 6604 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6587 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 6605 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6588 DRM_DEBUG("IH: D5 vline\n"); 6606
6589 } 6607 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6608 DRM_DEBUG("IH: D5 vline\n");
6609
6590 break; 6610 break;
6591 default: 6611 default:
6592 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6612 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
6596 case 6: /* D6 vblank/vline */ 6616 case 6: /* D6 vblank/vline */
6597 switch (src_data) { 6617 switch (src_data) {
6598 case 0: /* D6 vblank */ 6618 case 0: /* D6 vblank */
6599 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 6619 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6600 if (rdev->irq.crtc_vblank_int[5]) { 6620 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6601 drm_handle_vblank(rdev->ddev, 5); 6621
6602 rdev->pm.vblank_sync = true; 6622 if (rdev->irq.crtc_vblank_int[5]) {
6603 wake_up(&rdev->irq.vblank_queue); 6623 drm_handle_vblank(rdev->ddev, 5);
6604 } 6624 rdev->pm.vblank_sync = true;
6605 if (atomic_read(&rdev->irq.pflip[5])) 6625 wake_up(&rdev->irq.vblank_queue);
6606 radeon_crtc_handle_vblank(rdev, 5);
6607 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6608 DRM_DEBUG("IH: D6 vblank\n");
6609 } 6626 }
6627 if (atomic_read(&rdev->irq.pflip[5]))
6628 radeon_crtc_handle_vblank(rdev, 5);
6629 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6630 DRM_DEBUG("IH: D6 vblank\n");
6631
6610 break; 6632 break;
6611 case 1: /* D6 vline */ 6633 case 1: /* D6 vline */
6612 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 6634 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6613 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 6635 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6614 DRM_DEBUG("IH: D6 vline\n"); 6636
6615 } 6637 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6638 DRM_DEBUG("IH: D6 vline\n");
6639
6616 break; 6640 break;
6617 default: 6641 default:
6618 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6642 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
6632 case 42: /* HPD hotplug */ 6656 case 42: /* HPD hotplug */
6633 switch (src_data) { 6657 switch (src_data) {
6634 case 0: 6658 case 0:
6635 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 6659 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6636 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 6660 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6637 queue_hotplug = true; 6661
6638 DRM_DEBUG("IH: HPD1\n"); 6662 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6639 } 6663 queue_hotplug = true;
6664 DRM_DEBUG("IH: HPD1\n");
6665
6640 break; 6666 break;
6641 case 1: 6667 case 1:
6642 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 6668 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6643 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 6669 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6644 queue_hotplug = true; 6670
6645 DRM_DEBUG("IH: HPD2\n"); 6671 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6646 } 6672 queue_hotplug = true;
6673 DRM_DEBUG("IH: HPD2\n");
6674
6647 break; 6675 break;
6648 case 2: 6676 case 2:
6649 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 6677 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6650 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 6678 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6651 queue_hotplug = true; 6679
6652 DRM_DEBUG("IH: HPD3\n"); 6680 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6653 } 6681 queue_hotplug = true;
6682 DRM_DEBUG("IH: HPD3\n");
6683
6654 break; 6684 break;
6655 case 3: 6685 case 3:
6656 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 6686 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6657 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 6687 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6658 queue_hotplug = true; 6688
6659 DRM_DEBUG("IH: HPD4\n"); 6689 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6660 } 6690 queue_hotplug = true;
6691 DRM_DEBUG("IH: HPD4\n");
6692
6661 break; 6693 break;
6662 case 4: 6694 case 4:
6663 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 6695 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6664 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 6696 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6665 queue_hotplug = true; 6697
6666 DRM_DEBUG("IH: HPD5\n"); 6698 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6667 } 6699 queue_hotplug = true;
6700 DRM_DEBUG("IH: HPD5\n");
6701
6668 break; 6702 break;
6669 case 5: 6703 case 5:
6670 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6704 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6671 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 6705 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6672 queue_hotplug = true; 6706
6673 DRM_DEBUG("IH: HPD6\n"); 6707 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6674 } 6708 queue_hotplug = true;
6709 DRM_DEBUG("IH: HPD6\n");
6710
6675 break; 6711 break;
6676 case 6: 6712 case 6:
6677 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 6713 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6678 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 6714 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6679 queue_dp = true; 6715
6680 DRM_DEBUG("IH: HPD_RX 1\n"); 6716 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6681 } 6717 queue_dp = true;
6718 DRM_DEBUG("IH: HPD_RX 1\n");
6719
6682 break; 6720 break;
6683 case 7: 6721 case 7:
6684 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 6722 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6685 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 6723 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6686 queue_dp = true; 6724
6687 DRM_DEBUG("IH: HPD_RX 2\n"); 6725 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6688 } 6726 queue_dp = true;
6727 DRM_DEBUG("IH: HPD_RX 2\n");
6728
6689 break; 6729 break;
6690 case 8: 6730 case 8:
6691 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 6731 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6692 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 6732 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6693 queue_dp = true; 6733
6694 DRM_DEBUG("IH: HPD_RX 3\n"); 6734 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6695 } 6735 queue_dp = true;
6736 DRM_DEBUG("IH: HPD_RX 3\n");
6737
6696 break; 6738 break;
6697 case 9: 6739 case 9:
6698 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 6740 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6699 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 6741 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6700 queue_dp = true; 6742
6701 DRM_DEBUG("IH: HPD_RX 4\n"); 6743 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6702 } 6744 queue_dp = true;
6745 DRM_DEBUG("IH: HPD_RX 4\n");
6746
6703 break; 6747 break;
6704 case 10: 6748 case 10:
6705 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 6749 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6706 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 6750 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6707 queue_dp = true; 6751
6708 DRM_DEBUG("IH: HPD_RX 5\n"); 6752 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6709 } 6753 queue_dp = true;
6754 DRM_DEBUG("IH: HPD_RX 5\n");
6755
6710 break; 6756 break;
6711 case 11: 6757 case 11:
6712 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6758 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6713 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 6759 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6714 queue_dp = true; 6760
6715 DRM_DEBUG("IH: HPD_RX 6\n"); 6761 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6716 } 6762 queue_dp = true;
6763 DRM_DEBUG("IH: HPD_RX 6\n");
6764
6717 break; 6765 break;
6718 default: 6766 default:
6719 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6767 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 1dbdf3230dae..787cd8fd897f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ 2926 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, 2927 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, 2928 { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
2929 { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
2929 { 0, 0, 0, 0 }, 2930 { 0, 0, 0, 0 },
2930}; 2931};
2931 2932
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 01b558fe3695..9a0c2911272a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
555 .probe = rockchip_drm_platform_probe, 555 .probe = rockchip_drm_platform_probe,
556 .remove = rockchip_drm_platform_remove, 556 .remove = rockchip_drm_platform_remove,
557 .driver = { 557 .driver = {
558 .owner = THIS_MODULE,
559 .name = "rockchip-drm", 558 .name = "rockchip-drm",
560 .of_match_table = rockchip_drm_dt_ids, 559 .of_match_table = rockchip_drm_dt_ids,
561 .pm = &rockchip_drm_pm_ops, 560 .pm = &rockchip_drm_pm_ops,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 77d52893d40f..002645bb5bbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
162 struct rockchip_drm_private *private = dev->dev_private; 162 struct rockchip_drm_private *private = dev->dev_private;
163 struct drm_fb_helper *fb_helper = &private->fbdev_helper; 163 struct drm_fb_helper *fb_helper = &private->fbdev_helper;
164 164
165 drm_fb_helper_hotplug_event(fb_helper); 165 if (fb_helper)
166 drm_fb_helper_hotplug_event(fb_helper);
166} 167}
167 168
168static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 169static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index eb2282cc4a56..eba5f8a52fbd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
54 &rk_obj->dma_attrs); 54 &rk_obj->dma_attrs);
55} 55}
56 56
57int rockchip_gem_mmap_buf(struct drm_gem_object *obj, 57static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
58 struct vm_area_struct *vma) 58 struct vm_area_struct *vma)
59
59{ 60{
61 int ret;
60 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 62 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
61 struct drm_device *drm = obj->dev; 63 struct drm_device *drm = obj->dev;
62 unsigned long vm_size;
63 64
64 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 65 /*
65 vm_size = vma->vm_end - vma->vm_start; 66 * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
66 67 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
67 if (vm_size > obj->size) 68 */
68 return -EINVAL; 69 vma->vm_flags &= ~VM_PFNMAP;
69 70
70 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 71 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
71 obj->size, &rk_obj->dma_attrs); 72 obj->size, &rk_obj->dma_attrs);
73 if (ret)
74 drm_gem_vm_close(vma);
75
76 return ret;
72} 77}
73 78
74/* drm driver mmap file operations */ 79int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
75int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) 80 struct vm_area_struct *vma)
76{ 81{
77 struct drm_file *priv = filp->private_data; 82 struct drm_device *drm = obj->dev;
78 struct drm_device *dev = priv->minor->dev;
79 struct drm_gem_object *obj;
80 struct drm_vma_offset_node *node;
81 int ret; 83 int ret;
82 84
83 if (drm_device_is_unplugged(dev)) 85 mutex_lock(&drm->struct_mutex);
84 return -ENODEV; 86 ret = drm_gem_mmap_obj(obj, obj->size, vma);
87 mutex_unlock(&drm->struct_mutex);
88 if (ret)
89 return ret;
85 90
86 mutex_lock(&dev->struct_mutex); 91 return rockchip_drm_gem_object_mmap(obj, vma);
92}
87 93
88 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, 94/* drm driver mmap file operations */
89 vma->vm_pgoff, 95int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
90 vma_pages(vma)); 96{
91 if (!node) { 97 struct drm_gem_object *obj;
92 mutex_unlock(&dev->struct_mutex); 98 int ret;
93 DRM_ERROR("failed to find vma node.\n");
94 return -EINVAL;
95 } else if (!drm_vma_node_is_allowed(node, filp)) {
96 mutex_unlock(&dev->struct_mutex);
97 return -EACCES;
98 }
99 99
100 obj = container_of(node, struct drm_gem_object, vma_node); 100 ret = drm_gem_mmap(filp, vma);
101 ret = rockchip_gem_mmap_buf(obj, vma); 101 if (ret)
102 return ret;
102 103
103 mutex_unlock(&dev->struct_mutex); 104 obj = vma->vm_private_data;
104 105
105 return ret; 106 return rockchip_drm_gem_object_mmap(obj, vma);
106} 107}
107 108
108struct rockchip_gem_object * 109struct rockchip_gem_object *
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index dc65161d7cad..34b78e736532 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -170,6 +170,7 @@ struct vop_win_phy {
170 170
171 struct vop_reg enable; 171 struct vop_reg enable;
172 struct vop_reg format; 172 struct vop_reg format;
173 struct vop_reg rb_swap;
173 struct vop_reg act_info; 174 struct vop_reg act_info;
174 struct vop_reg dsp_info; 175 struct vop_reg dsp_info;
175 struct vop_reg dsp_st; 176 struct vop_reg dsp_st;
@@ -199,8 +200,12 @@ struct vop_data {
199static const uint32_t formats_01[] = { 200static const uint32_t formats_01[] = {
200 DRM_FORMAT_XRGB8888, 201 DRM_FORMAT_XRGB8888,
201 DRM_FORMAT_ARGB8888, 202 DRM_FORMAT_ARGB8888,
203 DRM_FORMAT_XBGR8888,
204 DRM_FORMAT_ABGR8888,
202 DRM_FORMAT_RGB888, 205 DRM_FORMAT_RGB888,
206 DRM_FORMAT_BGR888,
203 DRM_FORMAT_RGB565, 207 DRM_FORMAT_RGB565,
208 DRM_FORMAT_BGR565,
204 DRM_FORMAT_NV12, 209 DRM_FORMAT_NV12,
205 DRM_FORMAT_NV16, 210 DRM_FORMAT_NV16,
206 DRM_FORMAT_NV24, 211 DRM_FORMAT_NV24,
@@ -209,8 +214,12 @@ static const uint32_t formats_01[] = {
209static const uint32_t formats_234[] = { 214static const uint32_t formats_234[] = {
210 DRM_FORMAT_XRGB8888, 215 DRM_FORMAT_XRGB8888,
211 DRM_FORMAT_ARGB8888, 216 DRM_FORMAT_ARGB8888,
217 DRM_FORMAT_XBGR8888,
218 DRM_FORMAT_ABGR8888,
212 DRM_FORMAT_RGB888, 219 DRM_FORMAT_RGB888,
220 DRM_FORMAT_BGR888,
213 DRM_FORMAT_RGB565, 221 DRM_FORMAT_RGB565,
222 DRM_FORMAT_BGR565,
214}; 223};
215 224
216static const struct vop_win_phy win01_data = { 225static const struct vop_win_phy win01_data = {
@@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = {
218 .nformats = ARRAY_SIZE(formats_01), 227 .nformats = ARRAY_SIZE(formats_01),
219 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0), 228 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
220 .format = VOP_REG(WIN0_CTRL0, 0x7, 1), 229 .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
230 .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
221 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0), 231 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
222 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0), 232 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
223 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0), 233 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = {
234 .nformats = ARRAY_SIZE(formats_234), 244 .nformats = ARRAY_SIZE(formats_234),
235 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0), 245 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
236 .format = VOP_REG(WIN2_CTRL0, 0x7, 1), 246 .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
247 .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
237 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0), 248 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
238 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0), 249 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
239 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0), 250 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
@@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = {
242 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0), 253 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
243}; 254};
244 255
245static const struct vop_win_phy cursor_data = {
246 .data_formats = formats_234,
247 .nformats = ARRAY_SIZE(formats_234),
248 .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
249 .format = VOP_REG(HWC_CTRL0, 0x7, 1),
250 .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
251 .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
252};
253
254static const struct vop_ctrl ctrl_data = { 256static const struct vop_ctrl ctrl_data = {
255 .standby = VOP_REG(SYS_CTRL, 0x1, 22), 257 .standby = VOP_REG(SYS_CTRL, 0x1, 22),
256 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23), 258 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
@@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = {
282/* 284/*
283 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires 285 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
284 * special support to get alpha blending working. For now, just use overlay 286 * special support to get alpha blending working. For now, just use overlay
285 * window 1 for the drm cursor. 287 * window 3 for the drm cursor.
288 *
286 */ 289 */
287static const struct vop_win_data rk3288_vop_win_data[] = { 290static const struct vop_win_data rk3288_vop_win_data[] = {
288 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, 291 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
289 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR }, 292 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
290 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, 293 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
291 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, 294 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
292 { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
293}; 295};
294 296
295static const struct vop_data rk3288_vop = { 297static const struct vop_data rk3288_vop = {
@@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
352 } 354 }
353} 355}
354 356
357static bool has_rb_swapped(uint32_t format)
358{
359 switch (format) {
360 case DRM_FORMAT_XBGR8888:
361 case DRM_FORMAT_ABGR8888:
362 case DRM_FORMAT_BGR888:
363 case DRM_FORMAT_BGR565:
364 return true;
365 default:
366 return false;
367 }
368}
369
355static enum vop_data_format vop_convert_format(uint32_t format) 370static enum vop_data_format vop_convert_format(uint32_t format)
356{ 371{
357 switch (format) { 372 switch (format) {
358 case DRM_FORMAT_XRGB8888: 373 case DRM_FORMAT_XRGB8888:
359 case DRM_FORMAT_ARGB8888: 374 case DRM_FORMAT_ARGB8888:
375 case DRM_FORMAT_XBGR8888:
376 case DRM_FORMAT_ABGR8888:
360 return VOP_FMT_ARGB8888; 377 return VOP_FMT_ARGB8888;
361 case DRM_FORMAT_RGB888: 378 case DRM_FORMAT_RGB888:
379 case DRM_FORMAT_BGR888:
362 return VOP_FMT_RGB888; 380 return VOP_FMT_RGB888;
363 case DRM_FORMAT_RGB565: 381 case DRM_FORMAT_RGB565:
382 case DRM_FORMAT_BGR565:
364 return VOP_FMT_RGB565; 383 return VOP_FMT_RGB565;
365 case DRM_FORMAT_NV12: 384 case DRM_FORMAT_NV12:
366 return VOP_FMT_YUV420SP; 385 return VOP_FMT_YUV420SP;
@@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format)
378{ 397{
379 switch (format) { 398 switch (format) {
380 case DRM_FORMAT_ARGB8888: 399 case DRM_FORMAT_ARGB8888:
400 case DRM_FORMAT_ABGR8888:
381 return true; 401 return true;
382 default: 402 default:
383 return false; 403 return false;
@@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
588 enum vop_data_format format; 608 enum vop_data_format format;
589 uint32_t val; 609 uint32_t val;
590 bool is_alpha; 610 bool is_alpha;
611 bool rb_swap;
591 bool visible; 612 bool visible;
592 int ret; 613 int ret;
593 struct drm_rect dest = { 614 struct drm_rect dest = {
@@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
621 return 0; 642 return 0;
622 643
623 is_alpha = is_alpha_support(fb->pixel_format); 644 is_alpha = is_alpha_support(fb->pixel_format);
645 rb_swap = has_rb_swapped(fb->pixel_format);
624 format = vop_convert_format(fb->pixel_format); 646 format = vop_convert_format(fb->pixel_format);
625 if (format < 0) 647 if (format < 0)
626 return format; 648 return format;
@@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
689 val = (dsp_sty - 1) << 16; 711 val = (dsp_sty - 1) << 16;
690 val |= (dsp_stx - 1) & 0xffff; 712 val |= (dsp_stx - 1) & 0xffff;
691 VOP_WIN_SET(vop, win, dsp_st, val); 713 VOP_WIN_SET(vop, win, dsp_st, val);
714 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
692 715
693 if (is_alpha) { 716 if (is_alpha) {
694 VOP_WIN_SET(vop, win, dst_alpha_ctl, 717 VOP_WIN_SET(vop, win, dst_alpha_ctl,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
490 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
491 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
492#endif 492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) 493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494 defined(__powerpc__)
494 if (caching_flags & TTM_PL_FLAG_WC) 495 if (caching_flags & TTM_PL_FLAG_WC)
495 tmp = pgprot_writecombine(tmp); 496 tmp = pgprot_writecombine(tmp);
496 else 497 else
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3077f1554099..624d941aaad1 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
963 } else { 963 } else {
964 pool->npages_free += count; 964 pool->npages_free += count;
965 list_splice(&ttm_dma->pages_list, &pool->free_list); 965 list_splice(&ttm_dma->pages_list, &pool->free_list);
966 npages = count; 966 /*
967 if (pool->npages_free > _manager->options.max_size) { 967 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
968 * to free in order to minimize calls to set_memory_wb().
969 */
970 if (pool->npages_free >= (_manager->options.max_size +
971 NUM_PAGES_TO_ALLOC))
968 npages = pool->npages_free - _manager->options.max_size; 972 npages = pool->npages_free - _manager->options.max_size;
969 /* free at least NUM_PAGES_TO_ALLOC number of pages
970 * to reduce calls to set_memory_wb */
971 if (npages < NUM_PAGES_TO_ALLOC)
972 npages = NUM_PAGES_TO_ALLOC;
973 }
974 } 973 }
975 spin_unlock_irqrestore(&pool->lock, irq_flags); 974 spin_unlock_irqrestore(&pool->lock, irq_flags);
976 975
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2493 true, NULL); 2493 true, NULL);
2494 if (unlikely(ret != 0)) 2494 if (unlikely(ret != 0))
2495 goto out_err; 2495 goto out_err_nores;
2496 2496
2497 ret = vmw_validate_buffers(dev_priv, sw_context); 2497 ret = vmw_validate_buffers(dev_priv, sw_context);
2498 if (unlikely(ret != 0)) 2498 if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2536 vmw_resource_relocations_free(&sw_context->res_relocations); 2536 vmw_resource_relocations_free(&sw_context->res_relocations);
2537 2537
2538 vmw_fifo_commit(dev_priv, command_size); 2538 vmw_fifo_commit(dev_priv, command_size);
2539 mutex_unlock(&dev_priv->binding_mutex);
2539 2540
2540 vmw_query_bo_switch_commit(dev_priv, sw_context); 2541 vmw_query_bo_switch_commit(dev_priv, sw_context);
2541 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 2542 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2551 DRM_ERROR("Fence submission error. Syncing.\n"); 2552 DRM_ERROR("Fence submission error. Syncing.\n");
2552 2553
2553 vmw_resource_list_unreserve(&sw_context->resource_list, false); 2554 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2554 mutex_unlock(&dev_priv->binding_mutex);
2555 2555
2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2557 (void *) fence); 2557 (void *) fence);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 6d2f39d36e44..00f2058944e5 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
1107 return ret; 1107 return ret;
1108 } 1108 }
1109 1109
1110 for (i = 0; i < IPU_NUM_IRQS; i += 32)
1111 ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
1112
1110 for (i = 0; i < IPU_NUM_IRQS; i += 32) { 1113 for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1111 gc = irq_get_domain_generic_chip(ipu->domain, i); 1114 gc = irq_get_domain_generic_chip(ipu->domain, i);
1112 gc->reg_base = ipu->cm_reg; 1115 gc->reg_base = ipu->cm_reg;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
550 .driver_data = APPLE_HAS_FN },
551 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
552 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
553 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
554 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 555 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
550 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 556 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
551 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), 557 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..e6fce23b121a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1785 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1786 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1787 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1785 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1788 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1786 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1789 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1787 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1790 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
2463 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
2464 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
2465 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 2468 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
2469 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
2470 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
2471 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2472 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
2468 { } 2474 { }
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..a2dbbbe0d8d7 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
356 struct cp2112_force_read_report report; 356 struct cp2112_force_read_report report;
357 int ret; 357 int ret;
358 358
359 if (size > sizeof(dev->read_data))
360 size = sizeof(dev->read_data);
359 report.report = CP2112_DATA_READ_FORCE_SEND; 361 report.report = CP2112_DATA_READ_FORCE_SEND;
360 report.length = cpu_to_be16(size); 362 report.length = cpu_to_be16(size);
361 363
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..b3b225b75d0a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
145#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
146#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
147#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
145#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 148#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
146#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 149#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
147#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 150#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..e3c63640df73 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
462 462
463static void hidinput_cleanup_battery(struct hid_device *dev) 463static void hidinput_cleanup_battery(struct hid_device *dev)
464{ 464{
465 const struct power_supply_desc *psy_desc;
466
465 if (!dev->battery) 467 if (!dev->battery)
466 return; 468 return;
467 469
470 psy_desc = dev->battery->desc;
468 power_supply_unregister(dev->battery); 471 power_supply_unregister(dev->battery);
469 kfree(dev->battery->desc->name); 472 kfree(psy_desc->name);
470 kfree(dev->battery->desc); 473 kfree(psy_desc);
471 dev->battery = NULL; 474 dev->battery = NULL;
472} 475}
473#else /* !CONFIG_HID_BATTERY_STRENGTH */ 476#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..7c811252c1ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
778 /* 778 /*
779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" 779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
780 * for the stylus. 780 * for the stylus.
781 * The check for mt_report_id ensures we don't process
782 * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
783 * collection, but within the report ID.
781 */ 784 */
782 if (field->physical == HID_DG_STYLUS) 785 if (field->physical == HID_DG_STYLUS)
783 return 0; 786 return 0;
787 else if ((field->physical == 0) &&
788 (field->report->id != td->mt_report_id) &&
789 (td->mt_report_id != -1))
790 return 0;
784 791
785 if (field->application == HID_DG_TOUCHSCREEN || 792 if (field->application == HID_DG_TOUCHSCREEN ||
786 field->application == HID_DG_TOUCHPAD) 793 field->application == HID_DG_TOUCHPAD)
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
858 for (p = drvdata->rdesc; 858 for (p = drvdata->rdesc;
859 p <= drvdata->rdesc + drvdata->rsize - 4;) { 859 p <= drvdata->rdesc + drvdata->rsize - 4;) {
860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && 860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
861 p[3] < sizeof(params)) { 861 p[3] < ARRAY_SIZE(params)) {
862 v = params[p[3]]; 862 v = params[p[3]];
863 put_unaligned(cpu_to_le32(v), (s32 *)p); 863 put_unaligned(cpu_to_le32(v), (s32 *)p);
864 p += 4; 864 p += 4;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..20f9a653444c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 93 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 94 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, 95 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..01b937e63cf3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1271,17 +1271,52 @@ fail_leds:
1271 pad_input_dev = NULL; 1271 pad_input_dev = NULL;
1272 wacom_wac->pad_registered = false; 1272 wacom_wac->pad_registered = false;
1273fail_register_pad_input: 1273fail_register_pad_input:
1274 input_unregister_device(touch_input_dev); 1274 if (touch_input_dev)
1275 input_unregister_device(touch_input_dev);
1275 wacom_wac->touch_input = NULL; 1276 wacom_wac->touch_input = NULL;
1276 wacom_wac->touch_registered = false; 1277 wacom_wac->touch_registered = false;
1277fail_register_touch_input: 1278fail_register_touch_input:
1278 input_unregister_device(pen_input_dev); 1279 if (pen_input_dev)
1280 input_unregister_device(pen_input_dev);
1279 wacom_wac->pen_input = NULL; 1281 wacom_wac->pen_input = NULL;
1280 wacom_wac->pen_registered = false; 1282 wacom_wac->pen_registered = false;
1281fail_register_pen_input: 1283fail_register_pen_input:
1282 return error; 1284 return error;
1283} 1285}
1284 1286
1287/*
1288 * Not all devices report physical dimensions from HID.
1289 * Compute the default from hardcoded logical dimension
1290 * and resolution before driver overwrites them.
1291 */
1292static void wacom_set_default_phy(struct wacom_features *features)
1293{
1294 if (features->x_resolution) {
1295 features->x_phy = (features->x_max * 100) /
1296 features->x_resolution;
1297 features->y_phy = (features->y_max * 100) /
1298 features->y_resolution;
1299 }
1300}
1301
1302static void wacom_calculate_res(struct wacom_features *features)
1303{
1304 /* set unit to "100th of a mm" for devices not reported by HID */
1305 if (!features->unit) {
1306 features->unit = 0x11;
1307 features->unitExpo = -3;
1308 }
1309
1310 features->x_resolution = wacom_calc_hid_res(features->x_max,
1311 features->x_phy,
1312 features->unit,
1313 features->unitExpo);
1314 features->y_resolution = wacom_calc_hid_res(features->y_max,
1315 features->y_phy,
1316 features->unit,
1317 features->unitExpo);
1318}
1319
1285static void wacom_wireless_work(struct work_struct *work) 1320static void wacom_wireless_work(struct work_struct *work)
1286{ 1321{
1287 struct wacom *wacom = container_of(work, struct wacom, work); 1322 struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1339,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
1339 if (wacom_wac1->features.type != INTUOSHT && 1374 if (wacom_wac1->features.type != INTUOSHT &&
1340 wacom_wac1->features.type != BAMBOO_PT) 1375 wacom_wac1->features.type != BAMBOO_PT)
1341 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; 1376 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
1377 wacom_set_default_phy(&wacom_wac1->features);
1378 wacom_calculate_res(&wacom_wac1->features);
1342 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", 1379 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
1343 wacom_wac1->features.name); 1380 wacom_wac1->features.name);
1344 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", 1381 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1357,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
1357 wacom_wac2->features = 1394 wacom_wac2->features =
1358 *((struct wacom_features *)id->driver_data); 1395 *((struct wacom_features *)id->driver_data);
1359 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; 1396 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
1397 wacom_set_default_phy(&wacom_wac2->features);
1360 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; 1398 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
1399 wacom_calculate_res(&wacom_wac2->features);
1361 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, 1400 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
1362 "%s (WL) Finger",wacom_wac2->features.name); 1401 "%s (WL) Finger",wacom_wac2->features.name);
1363 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, 1402 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1405,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
1405 } 1444 }
1406} 1445}
1407 1446
1408/*
1409 * Not all devices report physical dimensions from HID.
1410 * Compute the default from hardcoded logical dimension
1411 * and resolution before driver overwrites them.
1412 */
1413static void wacom_set_default_phy(struct wacom_features *features)
1414{
1415 if (features->x_resolution) {
1416 features->x_phy = (features->x_max * 100) /
1417 features->x_resolution;
1418 features->y_phy = (features->y_max * 100) /
1419 features->y_resolution;
1420 }
1421}
1422
1423static void wacom_calculate_res(struct wacom_features *features)
1424{
1425 /* set unit to "100th of a mm" for devices not reported by HID */
1426 if (!features->unit) {
1427 features->unit = 0x11;
1428 features->unitExpo = -3;
1429 }
1430
1431 features->x_resolution = wacom_calc_hid_res(features->x_max,
1432 features->x_phy,
1433 features->unit,
1434 features->unitExpo);
1435 features->y_resolution = wacom_calc_hid_res(features->y_max,
1436 features->y_phy,
1437 features->unit,
1438 features->unitExpo);
1439}
1440
1441static size_t wacom_compute_pktlen(struct hid_device *hdev) 1447static size_t wacom_compute_pktlen(struct hid_device *hdev)
1442{ 1448{
1443 struct hid_report_enum *report_enum; 1449 struct hid_report_enum *report_enum;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0d244239e55d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2213 features->x_max = 4096; 2213 features->x_max = 4096;
2214 features->y_max = 4096; 2214 features->y_max = 4096;
2215 } 2215 }
2216 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
2217 features->device_type |= WACOM_DEVICETYPE_PAD;
2218 }
2216 } 2219 }
2217 2220
2218 /* 2221 /*
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
929 929
930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); 930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
931 931
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
933 {
934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8100
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8100",
939 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
941 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
942 },
943 },
944 { }
945};
946
932/* 947/*
933 * Probe for the presence of a supported laptop. 948 * Probe for the presence of a supported laptop.
934 */ 949 */
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
940 /* 955 /*
941 * Get DMI information 956 * Get DMI information
942 */ 957 */
943 if (!dmi_check_system(i8k_dmi_table)) { 958 if (!dmi_check_system(i8k_dmi_table) ||
959 dmi_check_system(i8k_blacklist_dmi_table)) {
944 if (!ignore_dmi && !force) 960 if (!ignore_dmi && !force)
945 return -ENODEV; 961 return -ENODEV;
946 962
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..85d106fe3ce8 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
582 { .compatible = "gmt,g763" }, 582 { .compatible = "gmt,g763" },
583 { }, 583 { },
584}; 584};
585MODULE_DEVICE_TABLE(of, g762_dt_match);
585 586
586/* 587/*
587 * Grab clock (a required property), enable it, get (fixed) clock frequency 588 * Grab clock (a required property), enable it, get (fixed) clock frequency
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..fbfc02bb2cfa 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -195,7 +195,7 @@ abort:
195} 195}
196 196
197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, 197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
198 unsigned int voltage) 198 unsigned long voltage)
199{ 199{
200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; 200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
201 int err; 201 int err;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
412 return sprintf(buf, "%d\n", val); 412 return sprintf(buf, "%d\n", val);
413} 413}
414 414
415static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, 415static ssize_t store_enable(struct device *dev,
416 const char *buf, size_t count) 416 struct device_attribute *devattr,
417 const char *buf, size_t count)
417{ 418{
418 int index = to_sensor_dev_attr(devattr)->index; 419 int index = to_sensor_dev_attr(devattr)->index;
419 struct nct7904_data *data = dev_get_drvdata(dev); 420 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
422 423
423 if (kstrtoul(buf, 10, &val) < 0) 424 if (kstrtoul(buf, 10, &val) < 0)
424 return -EINVAL; 425 return -EINVAL;
425 if (val > 1 || (val && !data->fan_mode[index])) 426 if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
426 return -EINVAL; 427 return -EINVAL;
427 428
428 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, 429 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
429 val ? data->fan_mode[index] : 0); 430 val == 2 ? data->fan_mode[index] : 0);
430 431
431 return ret ? ret : count; 432 return ret ? ret : count;
432} 433}
433 434
434/* Return 0 for manual mode or 1 for SmartFan mode */ 435/* Return 1 for manual mode or 2 for SmartFan mode */
435static ssize_t show_mode(struct device *dev, 436static ssize_t show_enable(struct device *dev,
436 struct device_attribute *devattr, char *buf) 437 struct device_attribute *devattr, char *buf)
437{ 438{
438 int index = to_sensor_dev_attr(devattr)->index; 439 int index = to_sensor_dev_attr(devattr)->index;
439 struct nct7904_data *data = dev_get_drvdata(dev); 440 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
443 if (val < 0) 444 if (val < 0)
444 return val; 445 return val;
445 446
446 return sprintf(buf, "%d\n", val ? 1 : 0); 447 return sprintf(buf, "%d\n", val ? 2 : 1);
447} 448}
448 449
449/* 2 attributes per channel: pwm and mode */ 450/* 2 attributes per channel: pwm and mode */
450static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 451static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
451 show_pwm, store_pwm, 0); 452 show_pwm, store_pwm, 0);
452static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, 453static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
453 show_mode, store_mode, 0); 454 show_enable, store_enable, 0);
454static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, 455static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
455 show_pwm, store_pwm, 1); 456 show_pwm, store_pwm, 1);
456static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, 457static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
457 show_mode, store_mode, 1); 458 show_enable, store_enable, 1);
458static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, 459static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
459 show_pwm, store_pwm, 2); 460 show_pwm, store_pwm, 2);
460static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, 461static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
461 show_mode, store_mode, 2); 462 show_enable, store_enable, 2);
462static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, 463static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
463 show_pwm, store_pwm, 3); 464 show_pwm, store_pwm, 3);
464static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, 465static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
465 show_mode, store_mode, 3); 466 show_enable, store_enable, 3);
466 467
467static struct attribute *nct7904_fanctl_attrs[] = { 468static struct attribute *nct7904_fanctl_attrs[] = {
468 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 469 &sensor_dev_attr_pwm1.dev_attr.attr,
469 &sensor_dev_attr_fan1_mode.dev_attr.attr, 470 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
470 &sensor_dev_attr_fan2_pwm.dev_attr.attr, 471 &sensor_dev_attr_pwm2.dev_attr.attr,
471 &sensor_dev_attr_fan2_mode.dev_attr.attr, 472 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
472 &sensor_dev_attr_fan3_pwm.dev_attr.attr, 473 &sensor_dev_attr_pwm3.dev_attr.attr,
473 &sensor_dev_attr_fan3_mode.dev_attr.attr, 474 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
474 &sensor_dev_attr_fan4_pwm.dev_attr.attr, 475 &sensor_dev_attr_pwm4.dev_attr.attr,
475 &sensor_dev_attr_fan4_mode.dev_attr.attr, 476 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
476 NULL 477 NULL
477}; 478};
478 479
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
574 {"nct7904", 0}, 575 {"nct7904", 0},
575 {} 576 {}
576}; 577};
578MODULE_DEVICE_TABLE(i2c, nct7904_id);
577 579
578static struct i2c_driver nct7904_driver = { 580static struct i2c_driver nct7904_driver = {
579 .class = I2C_CLASS_HWMON, 581 .class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 35ac23768ce9..577d58d1f1a1 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -633,6 +633,7 @@ config I2C_MPC
633config I2C_MT65XX 633config I2C_MT65XX
634 tristate "MediaTek I2C adapter" 634 tristate "MediaTek I2C adapter"
635 depends on ARCH_MEDIATEK || COMPILE_TEST 635 depends on ARCH_MEDIATEK || COMPILE_TEST
636 depends on HAS_DMA
636 help 637 help
637 This selects the MediaTek(R) Integrated Inter Circuit bus driver 638 This selects the MediaTek(R) Integrated Inter Circuit bus driver
638 for MT65xx and MT81xx. 639 for MT65xx and MT81xx.
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
692 692
693 platform_set_drvdata(pdev, iface); 693 platform_set_drvdata(pdev, iface);
694 694
695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " 695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
696 "regs_base@%p\n", iface->regs_base); 696 "regs_base@%p\n", iface->regs_base);
697 697
698 return 0; 698 return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
735module_exit(i2c_bfin_twi_exit); 735module_exit(i2c_bfin_twi_exit);
736 736
737MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); 737MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); 738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
739MODULE_LICENSE("GPL"); 739MODULE_LICENSE("GPL");
740MODULE_ALIAS("platform:i2c-bfin-twi"); 740MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 19b2d689a5ef..f325663c27c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -764,12 +764,15 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
764 if (IS_ERR(i2c->clk)) 764 if (IS_ERR(i2c->clk))
765 return PTR_ERR(i2c->clk); 765 return PTR_ERR(i2c->clk);
766 766
767 clk_prepare_enable(i2c->clk); 767 ret = clk_prepare_enable(i2c->clk);
768 if (ret)
769 return ret;
768 770
769 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", 771 ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
770 &clk_freq)) { 772 &clk_freq);
773 if (ret) {
771 dev_err(&pdev->dev, "clock-frequency not specified in DT"); 774 dev_err(&pdev->dev, "clock-frequency not specified in DT");
772 return clk_freq; 775 goto err;
773 } 776 }
774 777
775 i2c->speed = clk_freq / 1000; 778 i2c->speed = clk_freq / 1000;
@@ -790,10 +793,8 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
790 i2c->irq = platform_get_irq(pdev, 0); 793 i2c->irq = platform_get_irq(pdev, 0);
791 ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0, 794 ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
792 dev_name(&pdev->dev), i2c); 795 dev_name(&pdev->dev), i2c);
793 if (ret) { 796 if (ret)
794 ret = -ENODEV;
795 goto err; 797 goto err;
796 }
797 798
798 ret = i2c_add_adapter(&i2c->adap); 799 ret = i2c_add_adapter(&i2c->adap);
799 if (ret < 0) { 800 if (ret < 0) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..fc9bf7f30e35 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
1247 u32 reg; 1247 u32 reg;
1248 1248
1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1250 /* enable test mode */
1250 reg |= OMAP_I2C_SYSTEST_ST_EN; 1251 reg |= OMAP_I2C_SYSTEST_ST_EN;
1252 /* select SDA/SCL IO mode */
1253 reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
1254 /* set SCL to high-impedance state (reset value is 0) */
1255 reg |= OMAP_I2C_SYSTEST_SCL_O;
1256 /* set SDA to high-impedance state (reset value is 0) */
1257 reg |= OMAP_I2C_SYSTEST_SDA_O;
1251 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1258 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1252} 1259}
1253 1260
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
1257 u32 reg; 1264 u32 reg;
1258 1265
1259 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1266 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1267 /* restore reset values */
1260 reg &= ~OMAP_I2C_SYSTEST_ST_EN; 1268 reg &= ~OMAP_I2C_SYSTEST_ST_EN;
1269 reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
1270 reg &= ~OMAP_I2C_SYSTEST_SCL_O;
1271 reg &= ~OMAP_I2C_SYSTEST_SDA_O;
1261 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1272 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1262} 1273}
1263 1274
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index dcca7076231e..1c9cb65ac4cf 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -419,6 +419,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
419 rc = i2c_add_adapter(adapter); 419 rc = i2c_add_adapter(adapter);
420 if (rc) { 420 if (rc) {
421 dev_err(&pdev->dev, "Adapter registeration failed\n"); 421 dev_err(&pdev->dev, "Adapter registeration failed\n");
422 mbox_free_channel(ctx->mbox_chan);
422 return rc; 423 return rc;
423 } 424 }
424 425
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 069a41f116dd..c83e4d13cfc5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
567 if (bri->prepare_recovery) 567 if (bri->prepare_recovery)
568 bri->prepare_recovery(adap); 568 bri->prepare_recovery(adap);
569 569
570 bri->set_scl(adap, val);
571 ndelay(RECOVERY_NDELAY);
572
570 /* 573 /*
571 * By this time SCL is high, as we need to give 9 falling-rising edges 574 * By this time SCL is high, as we need to give 9 falling-rising edges
572 */ 575 */
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
597 600
598int i2c_generic_scl_recovery(struct i2c_adapter *adap) 601int i2c_generic_scl_recovery(struct i2c_adapter *adap)
599{ 602{
600 adap->bus_recovery_info->set_scl(adap, 1);
601 return i2c_generic_recovery(adap); 603 return i2c_generic_recovery(adap);
602} 604}
603EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); 605EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1012,6 +1014,8 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
1012 */ 1014 */
1013void i2c_unregister_device(struct i2c_client *client) 1015void i2c_unregister_device(struct i2c_client *client)
1014{ 1016{
1017 if (client->dev.of_node)
1018 of_node_clear_flag(client->dev.of_node, OF_POPULATED);
1015 device_unregister(&client->dev); 1019 device_unregister(&client->dev);
1016} 1020}
1017EXPORT_SYMBOL_GPL(i2c_unregister_device); 1021EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1320,8 +1324,11 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
1320 1324
1321 dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); 1325 dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
1322 1326
1323 for_each_available_child_of_node(adap->dev.of_node, node) 1327 for_each_available_child_of_node(adap->dev.of_node, node) {
1328 if (of_node_test_and_set_flag(node, OF_POPULATED))
1329 continue;
1324 of_i2c_register_device(adap, node); 1330 of_i2c_register_device(adap, node);
1331 }
1325} 1332}
1326 1333
1327static int of_dev_node_match(struct device *dev, void *data) 1334static int of_dev_node_match(struct device *dev, void *data)
@@ -1333,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
1333struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 1340struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
1334{ 1341{
1335 struct device *dev; 1342 struct device *dev;
1343 struct i2c_client *client;
1336 1344
1337 dev = bus_find_device(&i2c_bus_type, NULL, node, 1345 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1338 of_dev_node_match);
1339 if (!dev) 1346 if (!dev)
1340 return NULL; 1347 return NULL;
1341 1348
1342 return i2c_verify_client(dev); 1349 client = i2c_verify_client(dev);
1350 if (!client)
1351 put_device(dev);
1352
1353 return client;
1343} 1354}
1344EXPORT_SYMBOL(of_find_i2c_device_by_node); 1355EXPORT_SYMBOL(of_find_i2c_device_by_node);
1345 1356
@@ -1347,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
1347struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) 1358struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
1348{ 1359{
1349 struct device *dev; 1360 struct device *dev;
1361 struct i2c_adapter *adapter;
1350 1362
1351 dev = bus_find_device(&i2c_bus_type, NULL, node, 1363 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1352 of_dev_node_match);
1353 if (!dev) 1364 if (!dev)
1354 return NULL; 1365 return NULL;
1355 1366
1356 return i2c_verify_adapter(dev); 1367 adapter = i2c_verify_adapter(dev);
1368 if (!adapter)
1369 put_device(dev);
1370
1371 return adapter;
1357} 1372}
1358EXPORT_SYMBOL(of_find_i2c_adapter_by_node); 1373EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
1359#else 1374#else
@@ -1853,6 +1868,11 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
1853 if (adap == NULL) 1868 if (adap == NULL)
1854 return NOTIFY_OK; /* not for us */ 1869 return NOTIFY_OK; /* not for us */
1855 1870
1871 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
1872 put_device(&adap->dev);
1873 return NOTIFY_OK;
1874 }
1875
1856 client = of_i2c_register_device(adap, rd->dn); 1876 client = of_i2c_register_device(adap, rd->dn);
1857 put_device(&adap->dev); 1877 put_device(&adap->dev);
1858 1878
@@ -1863,6 +1883,10 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
1863 } 1883 }
1864 break; 1884 break;
1865 case OF_RECONFIG_CHANGE_REMOVE: 1885 case OF_RECONFIG_CHANGE_REMOVE:
1886 /* already depopulated? */
1887 if (!of_node_check_flag(rd->dn, OF_POPULATED))
1888 return NOTIFY_OK;
1889
1866 /* find our device by node */ 1890 /* find our device by node */
1867 client = of_find_i2c_device_by_node(rd->dn); 1891 client = of_find_i2c_device_by_node(rd->dn);
1868 if (client == NULL) 1892 if (client == NULL)
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..1da449614779 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
80 struct eeprom_data *eeprom; 80 struct eeprom_data *eeprom;
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (off + count > attr->size)
84 return -EFBIG;
85
86 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 83 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
87 84
88 spin_lock_irqsave(&eeprom->buffer_lock, flags); 85 spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
98 struct eeprom_data *eeprom; 95 struct eeprom_data *eeprom;
99 unsigned long flags; 96 unsigned long flags;
100 97
101 if (off + count > attr->size)
102 return -EFBIG;
103
104 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
105 99
106 spin_lock_irqsave(&eeprom->buffer_lock, flags); 100 spin_lock_irqsave(&eeprom->buffer_lock, flags);
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 4e70f51c2370..cc5a35750b50 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -1464,7 +1464,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
1464{ 1464{
1465 int i; 1465 int i;
1466 1466
1467 for (i = from; i >= 0; i++) { 1467 for (i = from; i >= 0; i--) {
1468 if (data->triggers[i].indio_trig) { 1468 if (data->triggers[i].indio_trig) {
1469 iio_trigger_unregister(data->triggers[i].indio_trig); 1469 iio_trigger_unregister(data->triggers[i].indio_trig);
1470 data->triggers[i].indio_trig = NULL; 1470 data->triggers[i].indio_trig = NULL;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..13ea1ea23328 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE) 557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
558 iio_push_event(indio_dev, 558 iio_push_event(indio_dev,
559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, 559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
560 IIO_EV_TYPE_THRESH, 560 IIO_EV_TYPE_MAG,
561 IIO_EV_DIR_RISING), 561 IIO_EV_DIR_RISING),
562 ts); 562 ts);
563 563
564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE) 564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
565 iio_push_event(indio_dev, 565 iio_push_event(indio_dev,
566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, 566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
567 IIO_EV_TYPE_THRESH, 567 IIO_EV_TYPE_MAG,
568 IIO_EV_DIR_RISING), 568 IIO_EV_DIR_RISING),
569 ts); 569 ts);
570 570
571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) 571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
572 iio_push_event(indio_dev, 572 iio_push_event(indio_dev,
573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, 573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
574 IIO_EV_TYPE_THRESH, 574 IIO_EV_TYPE_MAG,
575 IIO_EV_DIR_RISING), 575 IIO_EV_DIR_RISING),
576 ts); 576 ts);
577} 577}
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
644 644
645static const struct iio_event_spec mma8452_transient_event[] = { 645static const struct iio_event_spec mma8452_transient_event[] = {
646 { 646 {
647 .type = IIO_EV_TYPE_THRESH, 647 .type = IIO_EV_TYPE_MAG,
648 .dir = IIO_EV_DIR_RISING, 648 .dir = IIO_EV_DIR_RISING,
649 .mask_separate = BIT(IIO_EV_INFO_ENABLE), 649 .mask_separate = BIT(IIO_EV_INFO_ENABLE),
650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7c5565891cb8..eb0cd897714a 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -153,8 +153,7 @@ config DA9150_GPADC
153 153
154config CC10001_ADC 154config CC10001_ADC
155 tristate "Cosmic Circuits 10001 ADC driver" 155 tristate "Cosmic Circuits 10001 ADC driver"
156 depends on HAVE_CLK || REGULATOR 156 depends on HAS_IOMEM && HAVE_CLK && REGULATOR
157 depends on HAS_IOMEM
158 select IIO_BUFFER 157 select IIO_BUFFER
159 select IIO_TRIGGERED_BUFFER 158 select IIO_TRIGGERED_BUFFER
160 help 159 help
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 8a0eb4a04fb5..7b40925dd4ff 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -182,7 +182,7 @@ struct at91_adc_caps {
182 u8 ts_pen_detect_sensitivity; 182 u8 ts_pen_detect_sensitivity;
183 183
184 /* startup time calculate function */ 184 /* startup time calculate function */
185 u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz); 185 u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
186 186
187 u8 num_channels; 187 u8 num_channels;
188 struct at91_adc_reg_desc registers; 188 struct at91_adc_reg_desc registers;
@@ -201,7 +201,7 @@ struct at91_adc_state {
201 u8 num_channels; 201 u8 num_channels;
202 void __iomem *reg_base; 202 void __iomem *reg_base;
203 struct at91_adc_reg_desc *registers; 203 struct at91_adc_reg_desc *registers;
204 u8 startup_time; 204 u32 startup_time;
205 u8 sample_hold_time; 205 u8 sample_hold_time;
206 bool sleep_mode; 206 bool sleep_mode;
207 struct iio_trigger **trig; 207 struct iio_trigger **trig;
@@ -779,7 +779,7 @@ ret:
779 return ret; 779 return ret;
780} 780}
781 781
782static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz) 782static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
783{ 783{
784 /* 784 /*
785 * Number of ticks needed to cover the startup time of the ADC 785 * Number of ticks needed to cover the startup time of the ADC
@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
790 return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8; 790 return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
791} 791}
792 792
793static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz) 793static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
794{ 794{
795 /* 795 /*
796 * For sama5d3x and at91sam9x5, the formula changes to: 796 * For sama5d3x and at91sam9x5, the formula changes to:
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..d819823f7257 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
299 indio_dev->channels = chip_info->channels; 299 indio_dev->channels = chip_info->channels;
300 indio_dev->num_channels = chip_info->num_channels; 300 indio_dev->num_channels = chip_info->num_channels;
301 301
302 adc->chip_info = chip_info;
303
302 adc->transfer[0].tx_buf = &adc->tx_buf; 304 adc->transfer[0].tx_buf = &adc->tx_buf;
303 adc->transfer[0].len = sizeof(adc->tx_buf); 305 adc->transfer[0].len = sizeof(adc->tx_buf);
304 adc->transfer[1].rx_buf = adc->rx_buf; 306 adc->transfer[1].rx_buf = adc->rx_buf;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 8d4e019ea4ca..9c311c1e1ac7 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
349}; 349};
350 350
351module_platform_driver(rockchip_saradc_driver); 351module_platform_driver(rockchip_saradc_driver);
352
353MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
354MODULE_DESCRIPTION("Rockchip SARADC driver");
355MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 06f4792240f0..ebe415f10640 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
833 irq = platform_get_irq(pdev, 0); 833 irq = platform_get_irq(pdev, 0);
834 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, 834 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
835 twl4030_madc_threaded_irq_handler, 835 twl4030_madc_threaded_irq_handler,
836 IRQF_TRIGGER_RISING, "twl4030_madc", madc); 836 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
837 "twl4030_madc", madc);
837 if (ret) { 838 if (ret) {
838 dev_err(&pdev->dev, "could not request irq\n"); 839 dev_err(&pdev->dev, "could not request irq\n");
839 goto err_i2c; 840 goto err_i2c;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..819632bf1fda 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
635 struct vf610_adc *info = iio_priv(indio_dev); 635 struct vf610_adc *info = iio_priv(indio_dev);
636 636
637 if ((readval == NULL) || 637 if ((readval == NULL) ||
638 (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) 638 ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
639 return -EINVAL; 639 return -EINVAL;
640 640
641 *readval = readl(info->regs + reg); 641 *readval = readl(info->regs + reg);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 610fc98f88ef..595511022795 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
36 s32 poll_value = 0; 36 s32 poll_value = 0;
37 37
38 if (state) { 38 if (state) {
39 if (!atomic_read(&st->user_requested_state))
40 return 0;
39 if (sensor_hub_device_open(st->hsdev)) 41 if (sensor_hub_device_open(st->hsdev))
40 return -EIO; 42 return -EIO;
41 43
@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
52 54
53 poll_value = hid_sensor_read_poll_value(st); 55 poll_value = hid_sensor_read_poll_value(st);
54 } else { 56 } else {
55 if (!atomic_dec_and_test(&st->data_ready)) 57 int val;
58
59 val = atomic_dec_if_positive(&st->data_ready);
60 if (val < 0)
56 return 0; 61 return 0;
62
57 sensor_hub_device_close(st->hsdev); 63 sensor_hub_device_close(st->hsdev);
58 state_val = hid_sensor_get_usage_index(st->hsdev, 64 state_val = hid_sensor_get_usage_index(st->hsdev,
59 st->power_state.report_id, 65 st->power_state.report_id,
@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
92 98
93int hid_sensor_power_state(struct hid_sensor_common *st, bool state) 99int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
94{ 100{
101
95#ifdef CONFIG_PM 102#ifdef CONFIG_PM
96 int ret; 103 int ret;
97 104
105 atomic_set(&st->user_requested_state, state);
98 if (state) 106 if (state)
99 ret = pm_runtime_get_sync(&st->pdev->dev); 107 ret = pm_runtime_get_sync(&st->pdev->dev);
100 else { 108 else {
@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
109 117
110 return 0; 118 return 0;
111#else 119#else
120 atomic_set(&st->user_requested_state, state);
112 return _hid_sensor_power_state(st, state); 121 return _hid_sensor_power_state(st, state);
113#endif 122#endif
114} 123}
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 61bb9d4239ea..e98428df0d44 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -22,7 +22,7 @@
22#include "ad5624r.h" 22#include "ad5624r.h"
23 23
24static int ad5624r_spi_write(struct spi_device *spi, 24static int ad5624r_spi_write(struct spi_device *spi,
25 u8 cmd, u8 addr, u16 val, u8 len) 25 u8 cmd, u8 addr, u16 val, u8 shift)
26{ 26{
27 u32 data; 27 u32 data;
28 u8 msg[3]; 28 u8 msg[3];
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
35 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits, 35 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
36 * for the AD5664R, AD5644R, and AD5624R, respectively. 36 * for the AD5664R, AD5644R, and AD5624R, respectively.
37 */ 37 */
38 data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len)); 38 data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
39 msg[0] = data >> 16; 39 msg[0] = data >> 16;
40 msg[1] = data >> 8; 40 msg[1] = data >> 8;
41 msg[2] = data; 41 msg[2] = data;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 17d4bb15be4d..65ce86837177 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
431 return -EINVAL; 431 return -EINVAL;
432} 432}
433 433
434static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
435 struct iio_chan_spec const *chan, long mask)
436{
437 switch (mask) {
438 case IIO_CHAN_INFO_SCALE:
439 switch (chan->type) {
440 case IIO_ANGL_VEL:
441 return IIO_VAL_INT_PLUS_NANO;
442 default:
443 return IIO_VAL_INT_PLUS_MICRO;
444 }
445 default:
446 return IIO_VAL_INT_PLUS_MICRO;
447 }
448
449 return -EINVAL;
450}
434static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val) 451static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
435{ 452{
436 int result, i; 453 int result, i;
@@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
696 .driver_module = THIS_MODULE, 713 .driver_module = THIS_MODULE,
697 .read_raw = &inv_mpu6050_read_raw, 714 .read_raw = &inv_mpu6050_read_raw,
698 .write_raw = &inv_mpu6050_write_raw, 715 .write_raw = &inv_mpu6050_write_raw,
716 .write_raw_get_fmt = &inv_write_raw_get_fmt,
699 .attrs = &inv_attribute_group, 717 .attrs = &inv_attribute_group,
700 .validate_trigger = inv_mpu6050_validate_trigger, 718 .validate_trigger = inv_mpu6050_validate_trigger,
701}; 719};
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e6198b7c9cbf..a5c59251ec0e 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -188,6 +188,7 @@ config SENSORS_LM3533
188config LTR501 188config LTR501
189 tristate "LTR-501ALS-01 light sensor" 189 tristate "LTR-501ALS-01 light sensor"
190 depends on I2C 190 depends on I2C
191 select REGMAP_I2C
191 select IIO_BUFFER 192 select IIO_BUFFER
192 select IIO_TRIGGERED_BUFFER 193 select IIO_TRIGGERED_BUFFER
193 help 194 help
@@ -201,6 +202,7 @@ config LTR501
201config STK3310 202config STK3310
202 tristate "STK3310 ALS and proximity sensor" 203 tristate "STK3310 ALS and proximity sensor"
203 depends on I2C 204 depends on I2C
205 select REGMAP_I2C
204 help 206 help
205 Say yes here to get support for the Sensortek STK3310 ambient light 207 Say yes here to get support for the Sensortek STK3310 ambient light
206 and proximity sensor. The STK3311 model is also supported by this 208 and proximity sensor. The STK3311 model is also supported by this
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index 869033e48a1f..a1d4905cc9d2 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
123 for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) { 123 for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
124 if (val == cm3323_int_time[i].val && 124 if (val == cm3323_int_time[i].val &&
125 val2 == cm3323_int_time[i].val2) { 125 val2 == cm3323_int_time[i].val2) {
126 reg_conf = data->reg_conf; 126 reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
127 reg_conf |= i << CM3323_CONF_IT_SHIFT; 127 reg_conf |= i << CM3323_CONF_IT_SHIFT;
128 128
129 ret = i2c_smbus_write_word_data(data->client, 129 ret = i2c_smbus_write_word_data(data->client,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 1ef7d3773ab9..b5a0e66b5f28 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data)
1302 if (ret < 0) 1302 if (ret < 0)
1303 return ret; 1303 return ret;
1304 1304
1305 data->als_contr = ret | data->chip_info->als_mode_active; 1305 data->als_contr = status | data->chip_info->als_mode_active;
1306 1306
1307 ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status); 1307 ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status);
1308 if (ret < 0) 1308 if (ret < 0)
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index fee4297d7c8f..11a027adc204 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -43,7 +43,6 @@
43#define STK3311_CHIP_ID_VAL 0x1D 43#define STK3311_CHIP_ID_VAL 0x1D
44#define STK3310_PSINT_EN 0x01 44#define STK3310_PSINT_EN 0x01
45#define STK3310_PS_MAX_VAL 0xFFFF 45#define STK3310_PS_MAX_VAL 0xFFFF
46#define STK3310_THRESH_MAX 0xFFFF
47 46
48#define STK3310_DRIVER_NAME "stk3310" 47#define STK3310_DRIVER_NAME "stk3310"
49#define STK3310_REGMAP_NAME "stk3310_regmap" 48#define STK3310_REGMAP_NAME "stk3310_regmap"
@@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint =
84 REG_FIELD(STK3310_REG_FLAG, 4, 4); 83 REG_FIELD(STK3310_REG_FLAG, 4, 4);
85static const struct reg_field stk3310_reg_field_flag_nf = 84static const struct reg_field stk3310_reg_field_flag_nf =
86 REG_FIELD(STK3310_REG_FLAG, 0, 0); 85 REG_FIELD(STK3310_REG_FLAG, 0, 0);
87/* 86
88 * Maximum PS values with regard to scale. Used to export the 'inverse' 87/* Estimate maximum proximity values with regard to measurement scale. */
89 * PS value (high values for far objects, low values for near objects).
90 */
91static const int stk3310_ps_max[4] = { 88static const int stk3310_ps_max[4] = {
92 STK3310_PS_MAX_VAL / 64, 89 STK3310_PS_MAX_VAL / 640,
93 STK3310_PS_MAX_VAL / 16, 90 STK3310_PS_MAX_VAL / 160,
94 STK3310_PS_MAX_VAL / 4, 91 STK3310_PS_MAX_VAL / 40,
95 STK3310_PS_MAX_VAL, 92 STK3310_PS_MAX_VAL / 10
96}; 93};
97 94
98static const int stk3310_scale_table[][2] = { 95static const int stk3310_scale_table[][2] = {
@@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = {
128 /* Proximity event */ 125 /* Proximity event */
129 { 126 {
130 .type = IIO_EV_TYPE_THRESH, 127 .type = IIO_EV_TYPE_THRESH,
131 .dir = IIO_EV_DIR_FALLING, 128 .dir = IIO_EV_DIR_RISING,
132 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 129 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
133 BIT(IIO_EV_INFO_ENABLE), 130 BIT(IIO_EV_INFO_ENABLE),
134 }, 131 },
135 /* Out-of-proximity event */ 132 /* Out-of-proximity event */
136 { 133 {
137 .type = IIO_EV_TYPE_THRESH, 134 .type = IIO_EV_TYPE_THRESH,
138 .dir = IIO_EV_DIR_RISING, 135 .dir = IIO_EV_DIR_FALLING,
139 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 136 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
140 BIT(IIO_EV_INFO_ENABLE), 137 BIT(IIO_EV_INFO_ENABLE),
141 }, 138 },
@@ -203,25 +200,18 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
203 int *val, int *val2) 200 int *val, int *val2)
204{ 201{
205 u8 reg; 202 u8 reg;
206 u16 buf; 203 __be16 buf;
207 int ret; 204 int ret;
208 unsigned int index;
209 struct stk3310_data *data = iio_priv(indio_dev); 205 struct stk3310_data *data = iio_priv(indio_dev);
210 206
211 if (info != IIO_EV_INFO_VALUE) 207 if (info != IIO_EV_INFO_VALUE)
212 return -EINVAL; 208 return -EINVAL;
213 209
214 /* 210 /* Only proximity interrupts are implemented at the moment. */
215 * Only proximity interrupts are implemented at the moment.
216 * Since we're inverting proximity values, the sensor's 'high'
217 * threshold will become our 'low' threshold, associated with
218 * 'near' events. Similarly, the sensor's 'low' threshold will
219 * be our 'high' threshold, associated with 'far' events.
220 */
221 if (dir == IIO_EV_DIR_RISING) 211 if (dir == IIO_EV_DIR_RISING)
222 reg = STK3310_REG_THDL_PS;
223 else if (dir == IIO_EV_DIR_FALLING)
224 reg = STK3310_REG_THDH_PS; 212 reg = STK3310_REG_THDH_PS;
213 else if (dir == IIO_EV_DIR_FALLING)
214 reg = STK3310_REG_THDL_PS;
225 else 215 else
226 return -EINVAL; 216 return -EINVAL;
227 217
@@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
232 dev_err(&data->client->dev, "register read failed\n"); 222 dev_err(&data->client->dev, "register read failed\n");
233 return ret; 223 return ret;
234 } 224 }
235 regmap_field_read(data->reg_ps_gain, &index); 225 *val = be16_to_cpu(buf);
236 *val = swab16(stk3310_ps_max[index] - buf);
237 226
238 return IIO_VAL_INT; 227 return IIO_VAL_INT;
239} 228}
@@ -246,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
246 int val, int val2) 235 int val, int val2)
247{ 236{
248 u8 reg; 237 u8 reg;
249 u16 buf; 238 __be16 buf;
250 int ret; 239 int ret;
251 unsigned int index; 240 unsigned int index;
252 struct stk3310_data *data = iio_priv(indio_dev); 241 struct stk3310_data *data = iio_priv(indio_dev);
@@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
257 return -EINVAL; 246 return -EINVAL;
258 247
259 if (dir == IIO_EV_DIR_RISING) 248 if (dir == IIO_EV_DIR_RISING)
260 reg = STK3310_REG_THDL_PS;
261 else if (dir == IIO_EV_DIR_FALLING)
262 reg = STK3310_REG_THDH_PS; 249 reg = STK3310_REG_THDH_PS;
250 else if (dir == IIO_EV_DIR_FALLING)
251 reg = STK3310_REG_THDL_PS;
263 else 252 else
264 return -EINVAL; 253 return -EINVAL;
265 254
266 buf = swab16(stk3310_ps_max[index] - val); 255 buf = cpu_to_be16(val);
267 ret = regmap_bulk_write(data->regmap, reg, &buf, 2); 256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
268 if (ret < 0) 257 if (ret < 0)
269 dev_err(&client->dev, "failed to set PS threshold!\n"); 258 dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -312,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
312 int *val, int *val2, long mask) 301 int *val, int *val2, long mask)
313{ 302{
314 u8 reg; 303 u8 reg;
315 u16 buf; 304 __be16 buf;
316 int ret; 305 int ret;
317 unsigned int index; 306 unsigned int index;
318 struct stk3310_data *data = iio_priv(indio_dev); 307 struct stk3310_data *data = iio_priv(indio_dev);
@@ -333,15 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
333 mutex_unlock(&data->lock); 322 mutex_unlock(&data->lock);
334 return ret; 323 return ret;
335 } 324 }
336 *val = swab16(buf); 325 *val = be16_to_cpu(buf);
337 if (chan->type == IIO_PROXIMITY) {
338 /*
339 * Invert the proximity data so we return low values
340 * for close objects and high values for far ones.
341 */
342 regmap_field_read(data->reg_ps_gain, &index);
343 *val = stk3310_ps_max[index] - *val;
344 }
345 mutex_unlock(&data->lock); 326 mutex_unlock(&data->lock);
346 return IIO_VAL_INT; 327 return IIO_VAL_INT;
347 case IIO_CHAN_INFO_INT_TIME: 328 case IIO_CHAN_INFO_INT_TIME:
@@ -581,8 +562,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
581 } 562 }
582 event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, 563 event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
583 IIO_EV_TYPE_THRESH, 564 IIO_EV_TYPE_THRESH,
584 (dir ? IIO_EV_DIR_RISING : 565 (dir ? IIO_EV_DIR_FALLING :
585 IIO_EV_DIR_FALLING)); 566 IIO_EV_DIR_RISING));
586 iio_push_event(indio_dev, event, data->timestamp); 567 iio_push_event(indio_dev, event, data->timestamp);
587 568
588 /* Reset the interrupt flag */ 569 /* Reset the interrupt flag */
@@ -627,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
627 if (ret < 0) 608 if (ret < 0)
628 return ret; 609 return ret;
629 610
630 ret = iio_device_register(indio_dev); 611 if (client->irq < 0)
631 if (ret < 0) {
632 dev_err(&client->dev, "device_register failed\n");
633 stk3310_set_state(data, STK3310_STATE_STANDBY);
634 }
635
636 if (client->irq <= 0)
637 client->irq = stk3310_gpio_probe(client); 612 client->irq = stk3310_gpio_probe(client);
638 613
639 if (client->irq >= 0) { 614 if (client->irq >= 0) {
@@ -648,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
648 client->irq); 623 client->irq);
649 } 624 }
650 625
626 ret = iio_device_register(indio_dev);
627 if (ret < 0) {
628 dev_err(&client->dev, "device_register failed\n");
629 stk3310_set_state(data, STK3310_STATE_STANDBY);
630 }
631
651 return ret; 632 return ret;
652} 633}
653 634
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 71c2bde275aa..f8b1df018abe 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
185 if (val != 0) 185 if (val != 0)
186 return -EINVAL; 186 return -EINVAL;
187 for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) { 187 for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
188 if (val == tcs3414_times[i] * 1000) { 188 if (val2 == tcs3414_times[i] * 1000) {
189 data->timing &= ~TCS3414_INTEG_MASK; 189 data->timing &= ~TCS3414_INTEG_MASK;
190 data->timing |= i; 190 data->timing |= i;
191 return i2c_smbus_write_byte_data( 191 return i2c_smbus_write_byte_data(
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
90config BMC150_MAGN 90config BMC150_MAGN
91 tristate "Bosch BMC150 Magnetometer Driver" 91 tristate "Bosch BMC150 Magnetometer Driver"
92 depends on I2C 92 depends on I2C
93 select REGMAP_I2C
93 select IIO_BUFFER 94 select IIO_BUFFER
94 select IIO_TRIGGERED_BUFFER 95 select IIO_TRIGGERED_BUFFER
95 help 96 help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d4c178869991..1347a1f2e46f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
706 goto err_poweroff; 706 goto err_poweroff;
707 } 707 }
708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { 708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); 709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
710 ret = -ENODEV; 710 ret = -ENODEV;
711 goto err_poweroff; 711 goto err_poweroff;
712 } 712 }
713 dev_dbg(&data->client->dev, "Chip id %x\n", ret); 713 dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
714 714
715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; 715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
716 ret = bmc150_magn_set_odr(data, preset.odr); 716 ret = bmc150_magn_set_odr(data, preset.odr);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 7a2ea71c659a..706ebfd6297f 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -84,10 +84,10 @@
84#define MMC35240_OTP_START_ADDR 0x1B 84#define MMC35240_OTP_START_ADDR 0x1B
85 85
86enum mmc35240_resolution { 86enum mmc35240_resolution {
87 MMC35240_16_BITS_SLOW = 0, /* 100 Hz */ 87 MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */
88 MMC35240_16_BITS_FAST, /* 200 Hz */ 88 MMC35240_16_BITS_FAST, /* 4.08 ms */
89 MMC35240_14_BITS, /* 333 Hz */ 89 MMC35240_14_BITS, /* 2.16 ms */
90 MMC35240_12_BITS, /* 666 Hz */ 90 MMC35240_12_BITS, /* 1.20 ms */
91}; 91};
92 92
93enum mmc35240_axis { 93enum mmc35240_axis {
@@ -100,22 +100,22 @@ static const struct {
100 int sens[3]; /* sensitivity per X, Y, Z axis */ 100 int sens[3]; /* sensitivity per X, Y, Z axis */
101 int nfo; /* null field output */ 101 int nfo; /* null field output */
102} mmc35240_props_table[] = { 102} mmc35240_props_table[] = {
103 /* 16 bits, 100Hz ODR */ 103 /* 16 bits, 125Hz ODR */
104 { 104 {
105 {1024, 1024, 1024}, 105 {1024, 1024, 1024},
106 32768, 106 32768,
107 }, 107 },
108 /* 16 bits, 200Hz ODR */ 108 /* 16 bits, 250Hz ODR */
109 { 109 {
110 {1024, 1024, 770}, 110 {1024, 1024, 770},
111 32768, 111 32768,
112 }, 112 },
113 /* 14 bits, 333Hz ODR */ 113 /* 14 bits, 450Hz ODR */
114 { 114 {
115 {256, 256, 193}, 115 {256, 256, 193},
116 8192, 116 8192,
117 }, 117 },
118 /* 12 bits, 666Hz ODR */ 118 /* 12 bits, 800Hz ODR */
119 { 119 {
120 {64, 64, 48}, 120 {64, 64, 48},
121 2048, 121 2048,
@@ -133,9 +133,15 @@ struct mmc35240_data {
133 int axis_scale[3]; 133 int axis_scale[3];
134}; 134};
135 135
136static const int mmc35240_samp_freq[] = {100, 200, 333, 666}; 136static const struct {
137 int val;
138 int val2;
139} mmc35240_samp_freq[] = { {1, 500000},
140 {13, 0},
141 {25, 0},
142 {50, 0} };
137 143
138static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666"); 144static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50");
139 145
140#define MMC35240_CHANNEL(_axis) { \ 146#define MMC35240_CHANNEL(_axis) { \
141 .type = IIO_MAGN, \ 147 .type = IIO_MAGN, \
@@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data,
168 int i; 174 int i;
169 175
170 for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++) 176 for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++)
171 if (mmc35240_samp_freq[i] == val) 177 if (mmc35240_samp_freq[i].val == val &&
178 mmc35240_samp_freq[i].val2 == val2)
172 return i; 179 return i;
173 return -EINVAL; 180 return -EINVAL;
174} 181}
@@ -195,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
195 coil_bit = MMC35240_CTRL0_RESET_BIT; 202 coil_bit = MMC35240_CTRL0_RESET_BIT;
196 203
197 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, 204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
198 MMC35240_CTRL0_REFILL_BIT, 205 coil_bit, coil_bit);
199 coil_bit); 206
200} 207}
201 208
202static int mmc35240_init(struct mmc35240_data *data) 209static int mmc35240_init(struct mmc35240_data *data)
@@ -215,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
215 222
216 /* 223 /*
217 * make sure we restore sensor characteristics, by doing 224 * make sure we restore sensor characteristics, by doing
218 * a RESET/SET sequence 225 * a SET/RESET sequence, the axis polarity being naturally
226 * aligned after RESET
219 */ 227 */
220 ret = mmc35240_hw_set(data, false); 228 ret = mmc35240_hw_set(data, true);
221 if (ret < 0) 229 if (ret < 0)
222 return ret; 230 return ret;
223 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); 231 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
224 232
225 ret = mmc35240_hw_set(data, true); 233 ret = mmc35240_hw_set(data, false);
226 if (ret < 0) 234 if (ret < 0)
227 return ret; 235 return ret;
228 236
@@ -378,9 +386,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev,
378 if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq)) 386 if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq))
379 return -EINVAL; 387 return -EINVAL;
380 388
381 *val = mmc35240_samp_freq[i]; 389 *val = mmc35240_samp_freq[i].val;
382 *val2 = 0; 390 *val2 = mmc35240_samp_freq[i].val2;
383 return IIO_VAL_INT; 391 return IIO_VAL_INT_PLUS_MICRO;
384 default: 392 default:
385 return -EINVAL; 393 return -EINVAL;
386 } 394 }
@@ -496,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
496 } 504 }
497 505
498 data = iio_priv(indio_dev); 506 data = iio_priv(indio_dev);
507 i2c_set_clientdata(client, indio_dev);
499 data->client = client; 508 data->client = client;
500 data->regmap = regmap; 509 data->regmap = regmap;
501 data->res = MMC35240_16_BITS_SLOW; 510 data->res = MMC35240_16_BITS_SLOW;
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 2042e375f835..3d756bd8c703 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -80,6 +80,7 @@
80#define SX9500_COMPSTAT_MASK GENMASK(3, 0) 80#define SX9500_COMPSTAT_MASK GENMASK(3, 0)
81 81
82#define SX9500_NUM_CHANNELS 4 82#define SX9500_NUM_CHANNELS 4
83#define SX9500_CHAN_MASK GENMASK(SX9500_NUM_CHANNELS - 1, 0)
83 84
84struct sx9500_data { 85struct sx9500_data {
85 struct mutex mutex; 86 struct mutex mutex;
@@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data,
281 if (ret < 0) 282 if (ret < 0)
282 return ret; 283 return ret;
283 284
284 *val = 32767 - (s16)be16_to_cpu(regval); 285 *val = be16_to_cpu(regval);
285 286
286 return IIO_VAL_INT; 287 return IIO_VAL_INT;
287} 288}
@@ -329,27 +330,29 @@ static int sx9500_read_proximity(struct sx9500_data *data,
329 else 330 else
330 ret = sx9500_wait_for_sample(data); 331 ret = sx9500_wait_for_sample(data);
331 332
332 if (ret < 0)
333 return ret;
334
335 mutex_lock(&data->mutex); 333 mutex_lock(&data->mutex);
336 334
337 ret = sx9500_read_prox_data(data, chan, val);
338 if (ret < 0) 335 if (ret < 0)
339 goto out; 336 goto out_dec_data_rdy;
340 337
341 ret = sx9500_dec_chan_users(data, chan->channel); 338 ret = sx9500_read_prox_data(data, chan, val);
342 if (ret < 0) 339 if (ret < 0)
343 goto out; 340 goto out_dec_data_rdy;
344 341
345 ret = sx9500_dec_data_rdy_users(data); 342 ret = sx9500_dec_data_rdy_users(data);
346 if (ret < 0) 343 if (ret < 0)
344 goto out_dec_chan;
345
346 ret = sx9500_dec_chan_users(data, chan->channel);
347 if (ret < 0)
347 goto out; 348 goto out;
348 349
349 ret = IIO_VAL_INT; 350 ret = IIO_VAL_INT;
350 351
351 goto out; 352 goto out;
352 353
354out_dec_data_rdy:
355 sx9500_dec_data_rdy_users(data);
353out_dec_chan: 356out_dec_chan:
354 sx9500_dec_chan_users(data, chan->channel); 357 sx9500_dec_chan_users(data, chan->channel);
355out: 358out:
@@ -679,7 +682,7 @@ out:
679static int sx9500_buffer_preenable(struct iio_dev *indio_dev) 682static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
680{ 683{
681 struct sx9500_data *data = iio_priv(indio_dev); 684 struct sx9500_data *data = iio_priv(indio_dev);
682 int ret, i; 685 int ret = 0, i;
683 686
684 mutex_lock(&data->mutex); 687 mutex_lock(&data->mutex);
685 688
@@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
703static int sx9500_buffer_predisable(struct iio_dev *indio_dev) 706static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
704{ 707{
705 struct sx9500_data *data = iio_priv(indio_dev); 708 struct sx9500_data *data = iio_priv(indio_dev);
706 int ret, i; 709 int ret = 0, i;
707 710
708 iio_triggered_buffer_predisable(indio_dev); 711 iio_triggered_buffer_predisable(indio_dev);
709 712
@@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
800 unsigned int val; 803 unsigned int val;
801 804
802 ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, 805 ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
803 GENMASK(SX9500_NUM_CHANNELS, 0), 806 SX9500_CHAN_MASK, SX9500_CHAN_MASK);
804 GENMASK(SX9500_NUM_CHANNELS, 0));
805 if (ret < 0) 807 if (ret < 0)
806 return ret; 808 return ret;
807 809
@@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
821 823
822out: 824out:
823 regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0, 825 regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
824 GENMASK(SX9500_NUM_CHANNELS, 0), 0); 826 SX9500_CHAN_MASK, 0);
825 return ret; 827 return ret;
826} 828}
827 829
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index cb2e8ad8bfdc..7a2b639eaa96 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
204 *val = ret; 204 *val = ret;
205 return IIO_VAL_INT; 205 return IIO_VAL_INT;
206 case IIO_CHAN_INFO_OFFSET: 206 case IIO_CHAN_INFO_OFFSET:
207 *val = 13657; 207 *val = -13657;
208 *val2 = 500000; 208 *val2 = 500000;
209 return IIO_VAL_INT_PLUS_MICRO; 209 return IIO_VAL_INT_PLUS_MICRO;
210 case IIO_CHAN_INFO_SCALE: 210 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index fcc49f89b946..8f21f32f9739 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
132 struct tmp006_data *data = iio_priv(indio_dev); 132 struct tmp006_data *data = iio_priv(indio_dev);
133 int i; 133 int i;
134 134
135 if (mask != IIO_CHAN_INFO_SAMP_FREQ)
136 return -EINVAL;
137
135 for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++) 138 for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
136 if ((val == tmp006_freqs[i][0]) && 139 if ((val == tmp006_freqs[i][0]) &&
137 (val2 == tmp006_freqs[i][1])) { 140 (val2 == tmp006_freqs[i][1])) {
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index c7dcfe4ca5f1..0429040304fd 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
88 struct ib_ah *ah; 88 struct ib_ah *ah;
89 struct ib_mad_send_wr_private *mad_send_wr; 89 struct ib_mad_send_wr_private *mad_send_wr;
90 90
91 if (device->node_type == RDMA_NODE_IB_SWITCH) 91 if (rdma_cap_ib_switch(device))
92 port_priv = ib_get_agent_port(device, 0); 92 port_priv = ib_get_agent_port(device, 0);
93 else 93 else
94 port_priv = ib_get_agent_port(device, port_num); 94 port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
122 memcpy(send_buf->mad, mad_hdr, resp_mad_len); 122 memcpy(send_buf->mad, mad_hdr, resp_mad_len);
123 send_buf->ah = ah; 123 send_buf->ah = ah;
124 124
125 if (device->node_type == RDMA_NODE_IB_SWITCH) { 125 if (rdma_cap_ib_switch(device)) {
126 mad_send_wr = container_of(send_buf, 126 mad_send_wr = container_of(send_buf,
127 struct ib_mad_send_wr_private, 127 struct ib_mad_send_wr_private,
128 send_buf); 128 send_buf);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index dbddddd6fb5d..3a972ebf3c0d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -169,6 +169,7 @@ struct cm_device {
169 struct ib_device *ib_device; 169 struct ib_device *ib_device;
170 struct device *device; 170 struct device *device;
171 u8 ack_delay; 171 u8 ack_delay;
172 int going_down;
172 struct cm_port *port[0]; 173 struct cm_port *port[0];
173}; 174};
174 175
@@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
805{ 806{
806 int wait_time; 807 int wait_time;
807 unsigned long flags; 808 unsigned long flags;
809 struct cm_device *cm_dev;
810
811 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
812 if (!cm_dev)
813 return;
808 814
809 spin_lock_irqsave(&cm.lock, flags); 815 spin_lock_irqsave(&cm.lock, flags);
810 cm_cleanup_timewait(cm_id_priv->timewait_info); 816 cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
818 */ 824 */
819 cm_id_priv->id.state = IB_CM_TIMEWAIT; 825 cm_id_priv->id.state = IB_CM_TIMEWAIT;
820 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); 826 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
821 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, 827
822 msecs_to_jiffies(wait_time)); 828 /* Check if the device started its remove_one */
829 spin_lock_irq(&cm.lock);
830 if (!cm_dev->going_down)
831 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
832 msecs_to_jiffies(wait_time));
833 spin_unlock_irq(&cm.lock);
834
823 cm_id_priv->timewait_info = NULL; 835 cm_id_priv->timewait_info = NULL;
824} 836}
825 837
@@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
3305 struct cm_work *work; 3317 struct cm_work *work;
3306 unsigned long flags; 3318 unsigned long flags;
3307 int ret = 0; 3319 int ret = 0;
3320 struct cm_device *cm_dev;
3321
3322 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3323 if (!cm_dev)
3324 return -ENODEV;
3308 3325
3309 work = kmalloc(sizeof *work, GFP_ATOMIC); 3326 work = kmalloc(sizeof *work, GFP_ATOMIC);
3310 if (!work) 3327 if (!work)
@@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
3343 work->remote_id = cm_id->remote_id; 3360 work->remote_id = cm_id->remote_id;
3344 work->mad_recv_wc = NULL; 3361 work->mad_recv_wc = NULL;
3345 work->cm_event.event = IB_CM_USER_ESTABLISHED; 3362 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3346 queue_delayed_work(cm.wq, &work->work, 0); 3363
3364 /* Check if the device started its remove_one */
3365 spin_lock_irq(&cm.lock);
3366 if (!cm_dev->going_down) {
3367 queue_delayed_work(cm.wq, &work->work, 0);
3368 } else {
3369 kfree(work);
3370 ret = -ENODEV;
3371 }
3372 spin_unlock_irq(&cm.lock);
3373
3347out: 3374out:
3348 return ret; 3375 return ret;
3349} 3376}
@@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3394 enum ib_cm_event_type event; 3421 enum ib_cm_event_type event;
3395 u16 attr_id; 3422 u16 attr_id;
3396 int paths = 0; 3423 int paths = 0;
3424 int going_down = 0;
3397 3425
3398 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { 3426 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3399 case CM_REQ_ATTR_ID: 3427 case CM_REQ_ATTR_ID:
@@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3452 work->cm_event.event = event; 3480 work->cm_event.event = event;
3453 work->mad_recv_wc = mad_recv_wc; 3481 work->mad_recv_wc = mad_recv_wc;
3454 work->port = port; 3482 work->port = port;
3455 queue_delayed_work(cm.wq, &work->work, 0); 3483
3484 /* Check if the device started its remove_one */
3485 spin_lock_irq(&cm.lock);
3486 if (!port->cm_dev->going_down)
3487 queue_delayed_work(cm.wq, &work->work, 0);
3488 else
3489 going_down = 1;
3490 spin_unlock_irq(&cm.lock);
3491
3492 if (going_down) {
3493 kfree(work);
3494 ib_free_recv_mad(mad_recv_wc);
3495 }
3456} 3496}
3457 3497
3458static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, 3498static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
3771 3811
3772 cm_dev->ib_device = ib_device; 3812 cm_dev->ib_device = ib_device;
3773 cm_get_ack_delay(cm_dev); 3813 cm_get_ack_delay(cm_dev);
3774 3814 cm_dev->going_down = 0;
3775 cm_dev->device = device_create(&cm_class, &ib_device->dev, 3815 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3776 MKDEV(0, 0), NULL, 3816 MKDEV(0, 0), NULL,
3777 "%s", ib_device->name); 3817 "%s", ib_device->name);
@@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
3864 list_del(&cm_dev->list); 3904 list_del(&cm_dev->list);
3865 write_unlock_irqrestore(&cm.device_lock, flags); 3905 write_unlock_irqrestore(&cm.device_lock, flags);
3866 3906
3907 spin_lock_irq(&cm.lock);
3908 cm_dev->going_down = 1;
3909 spin_unlock_irq(&cm.lock);
3910
3867 for (i = 1; i <= ib_device->phys_port_cnt; i++) { 3911 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3868 if (!rdma_cap_ib_cm(ib_device, i)) 3912 if (!rdma_cap_ib_cm(ib_device, i))
3869 continue; 3913 continue;
3870 3914
3871 port = cm_dev->port[i-1]; 3915 port = cm_dev->port[i-1];
3872 ib_modify_port(ib_device, port->port_num, 0, &port_modify); 3916 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3873 ib_unregister_mad_agent(port->mad_agent); 3917 /*
3918 * We flush the queue here after the going_down set, this
3919 * verify that no new works will be queued in the recv handler,
3920 * after that we can call the unregister_mad_agent
3921 */
3874 flush_workqueue(cm.wq); 3922 flush_workqueue(cm.wq);
3923 ib_unregister_mad_agent(port->mad_agent);
3875 cm_remove_port_fs(port); 3924 cm_remove_port_fs(port);
3876 } 3925 }
3877 device_unregister(cm_dev->device); 3926 device_unregister(cm_dev->device);
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index e6ffa2e66c1a..22a3abee2a54 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
67 err_str = "Invalid port mapper client"; 67 err_str = "Invalid port mapper client";
68 goto pid_query_error; 68 goto pid_query_error;
69 } 69 }
70 if (iwpm_registered_client(nl_client)) 70 if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
71 iwpm_user_pid == IWPM_PID_UNAVAILABLE)
71 return 0; 72 return 0;
72 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client); 73 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
73 if (!skb) { 74 if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
106 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL); 107 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
107 if (ret) { 108 if (ret) {
108 skb = NULL; /* skb is freed in the netlink send-op handling */ 109 skb = NULL; /* skb is freed in the netlink send-op handling */
109 iwpm_set_registered(nl_client, 1);
110 iwpm_user_pid = IWPM_PID_UNAVAILABLE; 110 iwpm_user_pid = IWPM_PID_UNAVAILABLE;
111 err_str = "Unable to send a nlmsg"; 111 err_str = "Unable to send a nlmsg";
112 goto pid_query_error; 112 goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
144 err_str = "Invalid port mapper client"; 144 err_str = "Invalid port mapper client";
145 goto add_mapping_error; 145 goto add_mapping_error;
146 } 146 }
147 if (!iwpm_registered_client(nl_client)) { 147 if (!iwpm_valid_pid())
148 return 0;
149 if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
148 err_str = "Unregistered port mapper client"; 150 err_str = "Unregistered port mapper client";
149 goto add_mapping_error; 151 goto add_mapping_error;
150 } 152 }
151 if (!iwpm_valid_pid())
152 return 0;
153 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client); 153 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
154 if (!skb) { 154 if (!skb) {
155 err_str = "Unable to create a nlmsg"; 155 err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
214 err_str = "Invalid port mapper client"; 214 err_str = "Invalid port mapper client";
215 goto query_mapping_error; 215 goto query_mapping_error;
216 } 216 }
217 if (!iwpm_registered_client(nl_client)) { 217 if (!iwpm_valid_pid())
218 return 0;
219 if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
218 err_str = "Unregistered port mapper client"; 220 err_str = "Unregistered port mapper client";
219 goto query_mapping_error; 221 goto query_mapping_error;
220 } 222 }
221 if (!iwpm_valid_pid())
222 return 0;
223 ret = -ENOMEM; 223 ret = -ENOMEM;
224 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client); 224 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
225 if (!skb) { 225 if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
288 err_str = "Invalid port mapper client"; 288 err_str = "Invalid port mapper client";
289 goto remove_mapping_error; 289 goto remove_mapping_error;
290 } 290 }
291 if (!iwpm_registered_client(nl_client)) { 291 if (!iwpm_valid_pid())
292 return 0;
293 if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
292 err_str = "Unregistered port mapper client"; 294 err_str = "Unregistered port mapper client";
293 goto remove_mapping_error; 295 goto remove_mapping_error;
294 } 296 }
295 if (!iwpm_valid_pid())
296 return 0;
297 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client); 297 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
298 if (!skb) { 298 if (!skb) {
299 ret = -ENOMEM; 299 ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
388 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", 388 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
389 __func__, iwpm_user_pid); 389 __func__, iwpm_user_pid);
390 if (iwpm_valid_client(nl_client)) 390 if (iwpm_valid_client(nl_client))
391 iwpm_set_registered(nl_client, 1); 391 iwpm_set_registration(nl_client, IWPM_REG_VALID);
392register_pid_response_exit: 392register_pid_response_exit:
393 nlmsg_request->request_done = 1; 393 nlmsg_request->request_done = 1;
394 /* always for found nlmsg_request */ 394 /* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
644{ 644{
645 struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX]; 645 struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
646 const char *msg_type = "Mapping Info response"; 646 const char *msg_type = "Mapping Info response";
647 int iwpm_pid;
648 u8 nl_client; 647 u8 nl_client;
649 char *iwpm_name; 648 char *iwpm_name;
650 u16 iwpm_version; 649 u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
669 __func__, nl_client); 668 __func__, nl_client);
670 return ret; 669 return ret;
671 } 670 }
672 iwpm_set_registered(nl_client, 0); 671 iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
673 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); 672 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
673 iwpm_user_pid = cb->nlh->nlmsg_pid;
674 if (!iwpm_mapinfo_available()) 674 if (!iwpm_mapinfo_available())
675 return 0; 675 return 0;
676 iwpm_pid = cb->nlh->nlmsg_pid;
677 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n", 676 pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
678 __func__, iwpm_pid); 677 __func__, iwpm_user_pid);
679 ret = iwpm_send_mapinfo(nl_client, iwpm_pid); 678 ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
680 return ret; 679 return ret;
681} 680}
682EXPORT_SYMBOL(iwpm_mapping_info_cb); 681EXPORT_SYMBOL(iwpm_mapping_info_cb);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a626795bf9c7..5fb089e91353 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -78,6 +78,7 @@ init_exit:
78 mutex_unlock(&iwpm_admin_lock); 78 mutex_unlock(&iwpm_admin_lock);
79 if (!ret) { 79 if (!ret) {
80 iwpm_set_valid(nl_client, 1); 80 iwpm_set_valid(nl_client, 1);
81 iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
81 pr_debug("%s: Mapinfo and reminfo tables are created\n", 82 pr_debug("%s: Mapinfo and reminfo tables are created\n",
82 __func__); 83 __func__);
83 } 84 }
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
106 } 107 }
107 mutex_unlock(&iwpm_admin_lock); 108 mutex_unlock(&iwpm_admin_lock);
108 iwpm_set_valid(nl_client, 0); 109 iwpm_set_valid(nl_client, 0);
110 iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
109 return 0; 111 return 0;
110} 112}
111EXPORT_SYMBOL(iwpm_exit); 113EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
397} 399}
398 400
399/* valid client */ 401/* valid client */
400int iwpm_registered_client(u8 nl_client) 402u32 iwpm_get_registration(u8 nl_client)
401{ 403{
402 return iwpm_admin.reg_list[nl_client]; 404 return iwpm_admin.reg_list[nl_client];
403} 405}
404 406
405/* valid client */ 407/* valid client */
406void iwpm_set_registered(u8 nl_client, int reg) 408void iwpm_set_registration(u8 nl_client, u32 reg)
407{ 409{
408 iwpm_admin.reg_list[nl_client] = reg; 410 iwpm_admin.reg_list[nl_client] = reg;
409} 411}
410 412
413/* valid client */
414u32 iwpm_check_registration(u8 nl_client, u32 reg)
415{
416 return (iwpm_get_registration(nl_client) & reg);
417}
418
411int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, 419int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
412 struct sockaddr_storage *b_sockaddr) 420 struct sockaddr_storage *b_sockaddr)
413{ 421{
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index ee2d9ff095be..b7b9e194ce81 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -58,6 +58,10 @@
58#define IWPM_PID_UNDEFINED -1 58#define IWPM_PID_UNDEFINED -1
59#define IWPM_PID_UNAVAILABLE -2 59#define IWPM_PID_UNAVAILABLE -2
60 60
61#define IWPM_REG_UNDEF 0x01
62#define IWPM_REG_VALID 0x02
63#define IWPM_REG_INCOMPL 0x04
64
61struct iwpm_nlmsg_request { 65struct iwpm_nlmsg_request {
62 struct list_head inprocess_list; 66 struct list_head inprocess_list;
63 __u32 nlmsg_seq; 67 __u32 nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
88 atomic_t refcount; 92 atomic_t refcount;
89 atomic_t nlmsg_seq; 93 atomic_t nlmsg_seq;
90 int client_list[RDMA_NL_NUM_CLIENTS]; 94 int client_list[RDMA_NL_NUM_CLIENTS];
91 int reg_list[RDMA_NL_NUM_CLIENTS]; 95 u32 reg_list[RDMA_NL_NUM_CLIENTS];
92}; 96};
93 97
94/** 98/**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
159void iwpm_set_valid(u8 nl_client, int valid); 163void iwpm_set_valid(u8 nl_client, int valid);
160 164
161/** 165/**
162 * iwpm_registered_client - Check if the port mapper client is registered 166 * iwpm_check_registration - Check if the client registration
167 * matches the given one
163 * @nl_client: The index of the netlink client 168 * @nl_client: The index of the netlink client
169 * @reg: The given registration type to compare with
164 * 170 *
165 * Call iwpm_register_pid() to register a client 171 * Call iwpm_register_pid() to register a client
172 * Returns true if the client registration matches reg,
173 * otherwise returns false
174 */
175u32 iwpm_check_registration(u8 nl_client, u32 reg);
176
177/**
178 * iwpm_set_registration - Set the client registration
179 * @nl_client: The index of the netlink client
180 * @reg: Registration type to set
166 */ 181 */
167int iwpm_registered_client(u8 nl_client); 182void iwpm_set_registration(u8 nl_client, u32 reg);
168 183
169/** 184/**
170 * iwpm_set_registered - Set the port mapper client to registered or not 185 * iwpm_get_registration
171 * @nl_client: The index of the netlink client 186 * @nl_client: The index of the netlink client
172 * @reg: 1 if registered or 0 if not 187 *
188 * Returns the client registration type
173 */ 189 */
174void iwpm_set_registered(u8 nl_client, int reg); 190u32 iwpm_get_registration(u8 nl_client);
175 191
176/** 192/**
177 * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of 193 * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a4b1466c1bf6..786fc51bf04b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, 769 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
770 mad_agent_priv->qp_info->port_priv->port_num); 770 mad_agent_priv->qp_info->port_priv->port_num);
771 771
772 if (device->node_type == RDMA_NODE_IB_SWITCH && 772 if (rdma_cap_ib_switch(device) &&
773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 773 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
774 port_num = send_wr->wr.ud.port_num; 774 port_num = send_wr->wr.ud.port_num;
775 else 775 else
@@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
787 if ((opa_get_smp_direction(opa_smp) 787 if ((opa_get_smp_direction(opa_smp)
788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == 788 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
789 OPA_LID_PERMISSIVE && 789 OPA_LID_PERMISSIVE &&
790 opa_smi_handle_dr_smp_send(opa_smp, device->node_type, 790 opa_smi_handle_dr_smp_send(opa_smp,
791 rdma_cap_ib_switch(device),
791 port_num) == IB_SMI_DISCARD) { 792 port_num) == IB_SMI_DISCARD) {
792 ret = -EINVAL; 793 ret = -EINVAL;
793 dev_err(&device->dev, "OPA Invalid directed route\n"); 794 dev_err(&device->dev, "OPA Invalid directed route\n");
794 goto out; 795 goto out;
795 } 796 }
796 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); 797 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
797 if (opa_drslid != OPA_LID_PERMISSIVE && 798 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
798 opa_drslid & 0xffff0000) { 799 opa_drslid & 0xffff0000) {
799 ret = -EINVAL; 800 ret = -EINVAL;
800 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", 801 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
810 } else { 811 } else {
811 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == 812 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
812 IB_LID_PERMISSIVE && 813 IB_LID_PERMISSIVE &&
813 smi_handle_dr_smp_send(smp, device->node_type, port_num) == 814 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
814 IB_SMI_DISCARD) { 815 IB_SMI_DISCARD) {
815 ret = -EINVAL; 816 ret = -EINVAL;
816 dev_err(&device->dev, "Invalid directed route\n"); 817 dev_err(&device->dev, "Invalid directed route\n");
@@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
2030 struct ib_smp *smp = (struct ib_smp *)recv->mad; 2031 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2031 2032
2032 if (smi_handle_dr_smp_recv(smp, 2033 if (smi_handle_dr_smp_recv(smp,
2033 port_priv->device->node_type, 2034 rdma_cap_ib_switch(port_priv->device),
2034 port_num, 2035 port_num,
2035 port_priv->device->phys_port_cnt) == 2036 port_priv->device->phys_port_cnt) ==
2036 IB_SMI_DISCARD) 2037 IB_SMI_DISCARD)
@@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
2042 2043
2043 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2044 if (retsmi == IB_SMI_SEND) { /* don't forward */
2044 if (smi_handle_dr_smp_send(smp, 2045 if (smi_handle_dr_smp_send(smp,
2045 port_priv->device->node_type, 2046 rdma_cap_ib_switch(port_priv->device),
2046 port_num) == IB_SMI_DISCARD) 2047 port_num) == IB_SMI_DISCARD)
2047 return IB_SMI_DISCARD; 2048 return IB_SMI_DISCARD;
2048 2049
2049 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) 2050 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2050 return IB_SMI_DISCARD; 2051 return IB_SMI_DISCARD;
2051 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 2052 } else if (rdma_cap_ib_switch(port_priv->device)) {
2052 /* forward case for switches */ 2053 /* forward case for switches */
2053 memcpy(response, recv, mad_priv_size(response)); 2054 memcpy(response, recv, mad_priv_size(response));
2054 response->header.recv_wc.wc = &response->header.wc; 2055 response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2115 struct opa_smp *smp = (struct opa_smp *)recv->mad; 2116 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2116 2117
2117 if (opa_smi_handle_dr_smp_recv(smp, 2118 if (opa_smi_handle_dr_smp_recv(smp,
2118 port_priv->device->node_type, 2119 rdma_cap_ib_switch(port_priv->device),
2119 port_num, 2120 port_num,
2120 port_priv->device->phys_port_cnt) == 2121 port_priv->device->phys_port_cnt) ==
2121 IB_SMI_DISCARD) 2122 IB_SMI_DISCARD)
@@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2127 2128
2128 if (retsmi == IB_SMI_SEND) { /* don't forward */ 2129 if (retsmi == IB_SMI_SEND) { /* don't forward */
2129 if (opa_smi_handle_dr_smp_send(smp, 2130 if (opa_smi_handle_dr_smp_send(smp,
2130 port_priv->device->node_type, 2131 rdma_cap_ib_switch(port_priv->device),
2131 port_num) == IB_SMI_DISCARD) 2132 port_num) == IB_SMI_DISCARD)
2132 return IB_SMI_DISCARD; 2133 return IB_SMI_DISCARD;
2133 2134
@@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
2135 IB_SMI_DISCARD) 2136 IB_SMI_DISCARD)
2136 return IB_SMI_DISCARD; 2137 return IB_SMI_DISCARD;
2137 2138
2138 } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) { 2139 } else if (rdma_cap_ib_switch(port_priv->device)) {
2139 /* forward case for switches */ 2140 /* forward case for switches */
2140 memcpy(response, recv, mad_priv_size(response)); 2141 memcpy(response, recv, mad_priv_size(response));
2141 response->header.recv_wc.wc = &response->header.wc; 2142 response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
2235 goto out; 2236 goto out;
2236 } 2237 }
2237 2238
2238 if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) 2239 if (rdma_cap_ib_switch(port_priv->device))
2239 port_num = wc->port_num; 2240 port_num = wc->port_num;
2240 else 2241 else
2241 port_num = port_priv->port_num; 2242 port_num = port_priv->port_num;
@@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
3297 3298
3298static void ib_mad_init_device(struct ib_device *device) 3299static void ib_mad_init_device(struct ib_device *device)
3299{ 3300{
3300 int start, end, i; 3301 int start, i;
3301 3302
3302 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3303 start = rdma_start_port(device);
3303 start = 0;
3304 end = 0;
3305 } else {
3306 start = 1;
3307 end = device->phys_port_cnt;
3308 }
3309 3304
3310 for (i = start; i <= end; i++) { 3305 for (i = start; i <= rdma_end_port(device); i++) {
3311 if (!rdma_cap_ib_mad(device, i)) 3306 if (!rdma_cap_ib_mad(device, i))
3312 continue; 3307 continue;
3313 3308
@@ -3342,17 +3337,9 @@ error:
3342 3337
3343static void ib_mad_remove_device(struct ib_device *device) 3338static void ib_mad_remove_device(struct ib_device *device)
3344{ 3339{
3345 int start, end, i; 3340 int i;
3346
3347 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3348 start = 0;
3349 end = 0;
3350 } else {
3351 start = 1;
3352 end = device->phys_port_cnt;
3353 }
3354 3341
3355 for (i = start; i <= end; i++) { 3342 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3356 if (!rdma_cap_ib_mad(device, i)) 3343 if (!rdma_cap_ib_mad(device, i))
3357 continue; 3344 continue;
3358 3345
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 1244f02a5c6d..2cb865c7ce7a 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
812 if (!dev) 812 if (!dev)
813 return; 813 return;
814 814
815 if (device->node_type == RDMA_NODE_IB_SWITCH) 815 dev->start_port = rdma_start_port(device);
816 dev->start_port = dev->end_port = 0; 816 dev->end_port = rdma_end_port(device);
817 else {
818 dev->start_port = 1;
819 dev->end_port = device->phys_port_cnt;
820 }
821 817
822 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 818 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
823 if (!rdma_cap_ib_mcast(device, dev->start_port + i)) 819 if (!rdma_cap_ib_mcast(device, dev->start_port + i))
diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h
index 62d91bfa4cb7..3bfab3505a29 100644
--- a/drivers/infiniband/core/opa_smi.h
+++ b/drivers/infiniband/core/opa_smi.h
@@ -39,12 +39,12 @@
39 39
40#include "smi.h" 40#include "smi.h"
41 41
42enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, 42enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
43 int port_num, int phys_port_cnt); 43 int port_num, int phys_port_cnt);
44int opa_smi_get_fwd_port(struct opa_smp *smp); 44int opa_smi_get_fwd_port(struct opa_smp *smp);
45extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp); 45extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
46extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 46extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
47 u8 node_type, int port_num); 47 bool is_switch, int port_num);
48 48
49/* 49/*
50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM 50 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0fae85062a65..ca919f429666 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
1156 int s, e, i; 1156 int s, e, i;
1157 int count = 0; 1157 int count = 0;
1158 1158
1159 if (device->node_type == RDMA_NODE_IB_SWITCH) 1159 s = rdma_start_port(device);
1160 s = e = 0; 1160 e = rdma_end_port(device);
1161 else {
1162 s = 1;
1163 e = device->phys_port_cnt;
1164 }
1165 1161
1166 sa_dev = kzalloc(sizeof *sa_dev + 1162 sa_dev = kzalloc(sizeof *sa_dev +
1167 (e - s + 1) * sizeof (struct ib_sa_port), 1163 (e - s + 1) * sizeof (struct ib_sa_port),
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 368a561d1a5d..f19b23817c2b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -41,7 +41,7 @@
41#include "smi.h" 41#include "smi.h"
42#include "opa_smi.h" 42#include "opa_smi.h"
43 43
44static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num, 44static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
45 u8 *hop_ptr, u8 hop_cnt, 45 u8 *hop_ptr, u8 hop_cnt,
46 const u8 *initial_path, 46 const u8 *initial_path,
47 const u8 *return_path, 47 const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
64 64
65 /* C14-9:2 */ 65 /* C14-9:2 */
66 if (*hop_ptr && *hop_ptr < hop_cnt) { 66 if (*hop_ptr && *hop_ptr < hop_cnt) {
67 if (node_type != RDMA_NODE_IB_SWITCH) 67 if (!is_switch)
68 return IB_SMI_DISCARD; 68 return IB_SMI_DISCARD;
69 69
70 /* return_path set when received */ 70 /* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
77 if (*hop_ptr == hop_cnt) { 77 if (*hop_ptr == hop_cnt) {
78 /* return_path set when received */ 78 /* return_path set when received */
79 (*hop_ptr)++; 79 (*hop_ptr)++;
80 return (node_type == RDMA_NODE_IB_SWITCH || 80 return (is_switch ||
81 dr_dlid_is_permissive ? 81 dr_dlid_is_permissive ?
82 IB_SMI_HANDLE : IB_SMI_DISCARD); 82 IB_SMI_HANDLE : IB_SMI_DISCARD);
83 } 83 }
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
96 96
97 /* C14-13:2 */ 97 /* C14-13:2 */
98 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { 98 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
99 if (node_type != RDMA_NODE_IB_SWITCH) 99 if (!is_switch)
100 return IB_SMI_DISCARD; 100 return IB_SMI_DISCARD;
101 101
102 (*hop_ptr)--; 102 (*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
108 if (*hop_ptr == 1) { 108 if (*hop_ptr == 1) {
109 (*hop_ptr)--; 109 (*hop_ptr)--;
110 /* C14-13:3 -- SMPs destined for SM shouldn't be here */ 110 /* C14-13:3 -- SMPs destined for SM shouldn't be here */
111 return (node_type == RDMA_NODE_IB_SWITCH || 111 return (is_switch ||
112 dr_slid_is_permissive ? 112 dr_slid_is_permissive ?
113 IB_SMI_HANDLE : IB_SMI_DISCARD); 113 IB_SMI_HANDLE : IB_SMI_DISCARD);
114 } 114 }
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
127 * Return IB_SMI_DISCARD if the SMP should be discarded 127 * Return IB_SMI_DISCARD if the SMP should be discarded
128 */ 128 */
129enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 129enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
130 u8 node_type, int port_num) 130 bool is_switch, int port_num)
131{ 131{
132 return __smi_handle_dr_smp_send(node_type, port_num, 132 return __smi_handle_dr_smp_send(is_switch, port_num,
133 &smp->hop_ptr, smp->hop_cnt, 133 &smp->hop_ptr, smp->hop_cnt,
134 smp->initial_path, 134 smp->initial_path,
135 smp->return_path, 135 smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
139} 139}
140 140
141enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp, 141enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
142 u8 node_type, int port_num) 142 bool is_switch, int port_num)
143{ 143{
144 return __smi_handle_dr_smp_send(node_type, port_num, 144 return __smi_handle_dr_smp_send(is_switch, port_num,
145 &smp->hop_ptr, smp->hop_cnt, 145 &smp->hop_ptr, smp->hop_cnt,
146 smp->route.dr.initial_path, 146 smp->route.dr.initial_path,
147 smp->route.dr.return_path, 147 smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
152 OPA_LID_PERMISSIVE); 152 OPA_LID_PERMISSIVE);
153} 153}
154 154
155static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num, 155static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
156 int phys_port_cnt, 156 int phys_port_cnt,
157 u8 *hop_ptr, u8 hop_cnt, 157 u8 *hop_ptr, u8 hop_cnt,
158 const u8 *initial_path, 158 const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
173 173
174 /* C14-9:2 -- intermediate hop */ 174 /* C14-9:2 -- intermediate hop */
175 if (*hop_ptr && *hop_ptr < hop_cnt) { 175 if (*hop_ptr && *hop_ptr < hop_cnt) {
176 if (node_type != RDMA_NODE_IB_SWITCH) 176 if (!is_switch)
177 return IB_SMI_DISCARD; 177 return IB_SMI_DISCARD;
178 178
179 return_path[*hop_ptr] = port_num; 179 return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
188 return_path[*hop_ptr] = port_num; 188 return_path[*hop_ptr] = port_num;
189 /* hop_ptr updated when sending */ 189 /* hop_ptr updated when sending */
190 190
191 return (node_type == RDMA_NODE_IB_SWITCH || 191 return (is_switch ||
192 dr_dlid_is_permissive ? 192 dr_dlid_is_permissive ?
193 IB_SMI_HANDLE : IB_SMI_DISCARD); 193 IB_SMI_HANDLE : IB_SMI_DISCARD);
194 } 194 }
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
208 208
209 /* C14-13:2 */ 209 /* C14-13:2 */
210 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) { 210 if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
211 if (node_type != RDMA_NODE_IB_SWITCH) 211 if (!is_switch)
212 return IB_SMI_DISCARD; 212 return IB_SMI_DISCARD;
213 213
214 /* hop_ptr updated when sending */ 214 /* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
224 return IB_SMI_HANDLE; 224 return IB_SMI_HANDLE;
225 } 225 }
226 /* hop_ptr updated when sending */ 226 /* hop_ptr updated when sending */
227 return (node_type == RDMA_NODE_IB_SWITCH ? 227 return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
228 IB_SMI_HANDLE : IB_SMI_DISCARD);
229 } 228 }
230 229
231 /* C14-13:4 -- hop_ptr = 0 -> give to SM */ 230 /* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
238 * Adjust information for a received SMP 237 * Adjust information for a received SMP
239 * Return IB_SMI_DISCARD if the SMP should be dropped 238 * Return IB_SMI_DISCARD if the SMP should be dropped
240 */ 239 */
241enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, 240enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
242 int port_num, int phys_port_cnt) 241 int port_num, int phys_port_cnt)
243{ 242{
244 return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, 243 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
245 &smp->hop_ptr, smp->hop_cnt, 244 &smp->hop_ptr, smp->hop_cnt,
246 smp->initial_path, 245 smp->initial_path,
247 smp->return_path, 246 smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
254 * Adjust information for a received SMP 253 * Adjust information for a received SMP
255 * Return IB_SMI_DISCARD if the SMP should be dropped 254 * Return IB_SMI_DISCARD if the SMP should be dropped
256 */ 255 */
257enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type, 256enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
258 int port_num, int phys_port_cnt) 257 int port_num, int phys_port_cnt)
259{ 258{
260 return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt, 259 return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
261 &smp->hop_ptr, smp->hop_cnt, 260 &smp->hop_ptr, smp->hop_cnt,
262 smp->route.dr.initial_path, 261 smp->route.dr.initial_path,
263 smp->route.dr.return_path, 262 smp->route.dr.return_path,
diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h
index aff96bac49b4..33c91c8a16e9 100644
--- a/drivers/infiniband/core/smi.h
+++ b/drivers/infiniband/core/smi.h
@@ -51,12 +51,12 @@ enum smi_forward_action {
51 IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */ 51 IB_SMI_FORWARD /* SMP should be forwarded (for switches only) */
52}; 52};
53 53
54enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type, 54enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
55 int port_num, int phys_port_cnt); 55 int port_num, int phys_port_cnt);
56int smi_get_fwd_port(struct ib_smp *smp); 56int smi_get_fwd_port(struct ib_smp *smp);
57extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp); 57extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
58extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp, 58extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
59 u8 node_type, int port_num); 59 bool is_switch, int port_num);
60 60
61/* 61/*
62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM 62 * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index ed6b6c85c334..0b84a9cdfe5b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
870 goto err_put; 870 goto err_put;
871 } 871 }
872 872
873 if (device->node_type == RDMA_NODE_IB_SWITCH) { 873 if (rdma_cap_ib_switch(device)) {
874 ret = add_port(device, 0, port_callback); 874 ret = add_port(device, 0, port_callback);
875 if (ret) 875 if (ret)
876 goto err_put; 876 goto err_put;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 62c24b1452b8..009481073644 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
1193 return 0; 1193 return 0;
1194} 1194}
1195 1195
1196static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1196static void ib_ucm_release_dev(struct device *dev) 1197static void ib_ucm_release_dev(struct device *dev)
1197{ 1198{
1198 struct ib_ucm_device *ucm_dev; 1199 struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
1202 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) 1203 if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
1203 clear_bit(ucm_dev->devnum, dev_map); 1204 clear_bit(ucm_dev->devnum, dev_map);
1204 else 1205 else
1205 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); 1206 clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
1206 kfree(ucm_dev); 1207 kfree(ucm_dev);
1207} 1208}
1208 1209
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1226static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1227static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1227 1228
1228static dev_t overflow_maj; 1229static dev_t overflow_maj;
1229static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
1230static int find_overflow_devnum(void) 1230static int find_overflow_devnum(void)
1231{ 1231{
1232 int ret; 1232 int ret;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ad45469f7582..29b21213ea75 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */ 1354 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1355 if (file1 < file2) { 1355 if (file1 < file2) {
1356 mutex_lock(&file1->mut); 1356 mutex_lock(&file1->mut);
1357 mutex_lock(&file2->mut); 1357 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1358 } else { 1358 } else {
1359 mutex_lock(&file2->mut); 1359 mutex_lock(&file2->mut);
1360 mutex_lock(&file1->mut); 1360 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1361 } 1361 }
1362} 1362}
1363 1363
@@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1616 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1617 misc_deregister(&ucma_misc); 1617 misc_deregister(&ucma_misc);
1618 idr_destroy(&ctx_idr); 1618 idr_destroy(&ctx_idr);
1619 idr_destroy(&multicast_idr);
1619} 1620}
1620 1621
1621module_init(ucma_init); 1622module_init(ucma_init);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..bbbe0184e592 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
736 /* 736 /*
737 * T3 only supports 32 bits of size. 737 * T3 only supports 32 bits of size.
738 */ 738 */
739 if (sizeof(phys_addr_t) > 4) {
740 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
741 return ERR_PTR(-ENOTSUPP);
742 }
739 bl.size = 0xffffffff; 743 bl.size = 0xffffffff;
740 bl.addr = 0; 744 bl.addr = 0;
741 kva = 0; 745 kva = 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48f07cd..92d518382a9f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
814 printk(KERN_ERR MOD 814 printk(KERN_ERR MOD
815 "Unexpected cqe_status 0x%x for QPID=0x%0x\n", 815 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
816 CQE_STATUS(&cqe), CQE_QPID(&cqe)); 816 CQE_STATUS(&cqe), CQE_QPID(&cqe));
817 ret = -EINVAL; 817 wc->status = IB_WC_FATAL_ERR;
818 } 818 }
819 } 819 }
820out: 820out:
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 12b5bc23832b..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
226 const struct ib_mad *in_mad = (const struct ib_mad *)in; 226 const struct ib_mad *in_mad = (const struct ib_mad *)in;
227 struct ib_mad *out_mad = (struct ib_mad *)out; 227 struct ib_mad *out_mad = (struct ib_mad *)out;
228 228
229 BUG_ON(in_mad_size != sizeof(*in_mad) || 229 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
230 *out_mad_size != sizeof(*out_mad)); 230 *out_mad_size != sizeof(*out_mad)))
231 return IB_MAD_RESULT_FAILURE;
231 232
232 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) 233 if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
233 return IB_MAD_RESULT_FAILURE; 234 return IB_MAD_RESULT_FAILURE;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/sched.h> 36#include <linux/sched.h>
35#include <linux/spinlock.h> 37#include <linux/spinlock.h>
36#include <linux/idr.h> 38#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
399 u32 bar0 = 0, bar1 = 0; 401 u32 bar0 = 0, bar1 = 0;
400 402
401#ifdef CONFIG_X86_64 403#ifdef CONFIG_X86_64
402 if (WARN(pat_enabled(), 404 if (pat_enabled()) {
403 "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { 405 pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
404 ret = -ENODEV; 406 ret = -ENODEV;
405 goto bail; 407 goto bail;
406 } 408 }
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 948188e37f95..ad3a926ab3c5 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
1499 const struct ib_mad *in_mad = (const struct ib_mad *)in; 1499 const struct ib_mad *in_mad = (const struct ib_mad *)in;
1500 struct ib_mad *out_mad = (struct ib_mad *)out; 1500 struct ib_mad *out_mad = (struct ib_mad *)out;
1501 1501
1502 BUG_ON(in_mad_size != sizeof(*in_mad) || 1502 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
1503 *out_mad_size != sizeof(*out_mad)); 1503 *out_mad_size != sizeof(*out_mad)))
1504 return IB_MAD_RESULT_FAILURE;
1504 1505
1505 switch (in_mad->mad_hdr.mgmt_class) { 1506 switch (in_mad->mad_hdr.mgmt_class) {
1506 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 1507 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 48253b839a6f..30ba49c4a98c 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
2044 2044
2045 spin_lock_init(&idev->qp_table.lock); 2045 spin_lock_init(&idev->qp_table.lock);
2046 spin_lock_init(&idev->lk_table.lock); 2046 spin_lock_init(&idev->lk_table.lock);
2047 idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); 2047 idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
2048 /* Set the prefix to the default value (see ch. 4.1.1) */ 2048 /* Set the prefix to the default value (see ch. 4.1.1) */
2049 idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL); 2049 idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
2050 2050
2051 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size); 2051 ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
2052 if (ret) 2052 if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 85a50df2f203..68b3dfa922bf 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
860 struct mlx4_ib_dev *dev = to_mdev(ibdev); 860 struct mlx4_ib_dev *dev = to_mdev(ibdev);
861 const struct ib_mad *in_mad = (const struct ib_mad *)in; 861 const struct ib_mad *in_mad = (const struct ib_mad *)in;
862 struct ib_mad *out_mad = (struct ib_mad *)out; 862 struct ib_mad *out_mad = (struct ib_mad *)out;
863 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
863 864
864 BUG_ON(in_mad_size != sizeof(*in_mad) || 865 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
865 *out_mad_size != sizeof(*out_mad)); 866 *out_mad_size != sizeof(*out_mad)))
867 return IB_MAD_RESULT_FAILURE;
866 868
867 switch (rdma_port_get_link_layer(ibdev, port_num)) { 869 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
868 case IB_LINK_LAYER_INFINIBAND: 870 * queries, should be called only by VFs and for that specific purpose
869 if (!mlx4_is_slave(dev->dev)) 871 */
870 return ib_process_mad(ibdev, mad_flags, port_num, in_wc, 872 if (link == IB_LINK_LAYER_INFINIBAND) {
871 in_grh, in_mad, out_mad); 873 if (mlx4_is_slave(dev->dev) &&
872 case IB_LINK_LAYER_ETHERNET: 874 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
873 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 875 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
874 in_grh, in_mad, out_mad); 876 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
875 default: 877 in_grh, in_mad, out_mad);
876 return -EINVAL; 878
879 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
880 in_grh, in_mad, out_mad);
877 } 881 }
882
883 if (link == IB_LINK_LAYER_ETHERNET)
884 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
885 in_grh, in_mad, out_mad);
886
887 return -EINVAL;
878} 888}
879 889
880static void send_handler(struct ib_mad_agent *agent, 890static void send_handler(struct ib_mad_agent *agent,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 067a691ecbed..8be6db816460 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; 253 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
254 props->timestamp_mask = 0xFFFFFFFFFFFFULL; 254 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
255 255
256 err = mlx4_get_internal_clock_params(dev->dev, &clock_params); 256 if (!mlx4_is_slave(dev->dev))
257 if (err) 257 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
258 goto out;
259 258
260 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { 259 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
261 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
262 resp.response_length += sizeof(resp.hca_core_clock_offset); 260 resp.response_length += sizeof(resp.hca_core_clock_offset);
263 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP; 261 if (!err && !mlx4_is_slave(dev->dev)) {
262 resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
263 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
264 }
264 } 265 }
265 266
266 if (uhw->outlen) { 267 if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2669 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); 2670 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2670 if (!dm) { 2671 if (!dm) {
2671 pr_err("failed to allocate memory for tunneling qp update\n"); 2672 pr_err("failed to allocate memory for tunneling qp update\n");
2672 goto out; 2673 return;
2673 } 2674 }
2674 2675
2675 for (i = 0; i < ports; i++) { 2676 for (i = 0; i < ports; i++) {
2676 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); 2677 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2677 if (!dm[i]) { 2678 if (!dm[i]) {
2678 pr_err("failed to allocate memory for tunneling qp update work struct\n"); 2679 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2679 for (i = 0; i < dev->caps.num_ports; i++) { 2680 while (--i >= 0)
2680 if (dm[i]) 2681 kfree(dm[i]);
2681 kfree(dm[i]);
2682 }
2683 goto out; 2682 goto out;
2684 } 2683 }
2685 }
2686 /* initialize or tear down tunnel QPs for the slave */
2687 for (i = 0; i < ports; i++) {
2688 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); 2684 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2689 dm[i]->port = first_port + i + 1; 2685 dm[i]->port = first_port + i + 1;
2690 dm[i]->slave = slave; 2686 dm[i]->slave = slave;
2691 dm[i]->do_init = do_init; 2687 dm[i]->do_init = do_init;
2692 dm[i]->dev = ibdev; 2688 dm[i]->dev = ibdev;
2693 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); 2689 }
2694 if (!ibdev->sriov.is_going_down) 2690 /* initialize or tear down tunnel QPs for the slave */
2691 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2692 if (!ibdev->sriov.is_going_down) {
2693 for (i = 0; i < ports; i++)
2695 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); 2694 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2696 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); 2695 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2696 } else {
2697 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2698 for (i = 0; i < ports; i++)
2699 kfree(dm[i]);
2697 } 2700 }
2698out: 2701out:
2699 kfree(dm); 2702 kfree(dm);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 01fc97db45d6..b84d13a487cc 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
68 const struct ib_mad *in_mad = (const struct ib_mad *)in; 68 const struct ib_mad *in_mad = (const struct ib_mad *)in;
69 struct ib_mad *out_mad = (struct ib_mad *)out; 69 struct ib_mad *out_mad = (struct ib_mad *)out;
70 70
71 BUG_ON(in_mad_size != sizeof(*in_mad) || 71 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
72 *out_mad_size != sizeof(*out_mad)); 72 *out_mad_size != sizeof(*out_mad)))
73 return IB_MAD_RESULT_FAILURE;
73 74
74 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 75 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
75 76
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 6b2418b74c99..7c3f2fb44ba5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
209 const struct ib_mad *in_mad = (const struct ib_mad *)in; 209 const struct ib_mad *in_mad = (const struct ib_mad *)in;
210 struct ib_mad *out_mad = (struct ib_mad *)out; 210 struct ib_mad *out_mad = (struct ib_mad *)out;
211 211
212 BUG_ON(in_mad_size != sizeof(*in_mad) || 212 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
213 *out_mad_size != sizeof(*out_mad)); 213 *out_mad_size != sizeof(*out_mad)))
214 return IB_MAD_RESULT_FAILURE;
214 215
215 /* Forward locally generated traps to the SM */ 216 /* Forward locally generated traps to the SM */
216 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 217 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 9047af429906..8a3ad170d790 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1520 int rc = arpindex; 1520 int rc = arpindex;
1521 struct net_device *netdev; 1521 struct net_device *netdev;
1522 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; 1522 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1523 __be32 dst_ipaddr = htonl(dst_ip);
1523 1524
1524 rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); 1525 rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
1525 if (IS_ERR(rt)) { 1526 if (IS_ERR(rt)) {
1526 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", 1527 printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
1527 __func__, dst_ip); 1528 __func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1533 else 1534 else
1534 netdev = nesvnic->netdev; 1535 netdev = nesvnic->netdev;
1535 1536
1536 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev); 1537 neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
1537 1538
1538 rcu_read_lock(); 1539 rcu_read_lock();
1539 if (neigh) { 1540 if (neigh) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 02120d340d50..4713dd7ed764 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
3861 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | 3861 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
3862 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]); 3862 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
3863 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32( 3863 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
3864 (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]); 3864 (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
3865 } else { 3865 } else {
3866 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0; 3866 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
3867 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0; 3867 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..6a36338593cd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_H__ 43#ifndef __OCRDMA_H__
29#define __OCRDMA_H__ 44#define __OCRDMA_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_ABI_H__ 43#ifndef __OCRDMA_ABI_H__
29#define __OCRDMA_ABI_H__ 44#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 4bafa15708d0..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <net/neighbour.h> 43#include <net/neighbour.h>
29#include <net/netevent.h> 44#include <net/netevent.h>
@@ -215,8 +230,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
215 const struct ib_mad *in_mad = (const struct ib_mad *)in; 230 const struct ib_mad *in_mad = (const struct ib_mad *)in;
216 struct ib_mad *out_mad = (struct ib_mad *)out; 231 struct ib_mad *out_mad = (struct ib_mad *)out;
217 232
218 BUG_ON(in_mad_size != sizeof(*in_mad) || 233 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
219 *out_mad_size != sizeof(*out_mad)); 234 *out_mad_size != sizeof(*out_mad)))
235 return IB_MAD_RESULT_FAILURE;
220 236
221 switch (in_mad->mad_hdr.mgmt_class) { 237 switch (in_mad->mad_hdr.mgmt_class) {
222 case IB_MGMT_CLASS_PERF_MGMT: 238 case IB_MGMT_CLASS_PERF_MGMT:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_AH_H__ 43#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 44#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/sched.h> 43#include <linux/sched.h>
29#include <linux/interrupt.h> 44#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_HW_H__ 43#ifndef __OCRDMA_HW_H__
29#define __OCRDMA_HW_H__ 44#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 8a1398b253a2..b119a3413a15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/module.h> 43#include <linux/module.h>
29#include <linux/idr.h> 44#include <linux/idr.h>
@@ -46,7 +61,7 @@
46MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 61MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
47MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); 62MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
48MODULE_AUTHOR("Emulex Corporation"); 63MODULE_AUTHOR("Emulex Corporation");
49MODULE_LICENSE("GPL"); 64MODULE_LICENSE("Dual BSD/GPL");
50 65
51static LIST_HEAD(ocrdma_dev_list); 66static LIST_HEAD(ocrdma_dev_list);
52static DEFINE_SPINLOCK(ocrdma_devlist_lock); 67static DEFINE_SPINLOCK(ocrdma_devlist_lock);
@@ -696,6 +711,7 @@ static void __exit ocrdma_exit_module(void)
696 ocrdma_unregister_inet6addr_notifier(); 711 ocrdma_unregister_inet6addr_notifier();
697 ocrdma_unregister_inetaddr_notifier(); 712 ocrdma_unregister_inetaddr_notifier();
698 ocrdma_rem_debugfs(); 713 ocrdma_rem_debugfs();
714 idr_destroy(&ocrdma_dev_id);
699} 715}
700 716
701module_init(ocrdma_init_module); 717module_init(ocrdma_init_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..80006b24aa11 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_SLI_H__ 43#ifndef __OCRDMA_SLI_H__
29#define __OCRDMA_SLI_H__ 44#define __OCRDMA_SLI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h> 44#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_STATS_H__ 43#ifndef __OCRDMA_STATS_H__
29#define __OCRDMA_STATS_H__ 44#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..bc84cd462ecf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..eaccb2d3cb9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_VERBS_H__ 43#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 44#define __OCRDMA_VERBS_H__
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 05e3242d8442..9625e7c438e5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
2412 const struct ib_mad *in_mad = (const struct ib_mad *)in; 2412 const struct ib_mad *in_mad = (const struct ib_mad *)in;
2413 struct ib_mad *out_mad = (struct ib_mad *)out; 2413 struct ib_mad *out_mad = (struct ib_mad *)out;
2414 2414
2415 BUG_ON(in_mad_size != sizeof(*in_mad) || 2415 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
2416 *out_mad_size != sizeof(*out_mad)); 2416 *out_mad_size != sizeof(*out_mad)))
2417 return IB_MAD_RESULT_FAILURE;
2417 2418
2418 switch (in_mad->mad_hdr.mgmt_class) { 2419 switch (in_mad->mad_hdr.mgmt_class) {
2419 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 2420 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bd94b0a6e9e5..79859c4d43c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
239 struct net_device *dev; 239 struct net_device *dev;
240 struct ipoib_neigh *neigh; 240 struct ipoib_neigh *neigh;
241 struct ipoib_path *path; 241 struct ipoib_path *path;
242 struct ipoib_cm_tx_buf *tx_ring; 242 struct ipoib_tx_buf *tx_ring;
243 unsigned tx_head; 243 unsigned tx_head;
244 unsigned tx_tail; 244 unsigned tx_tail;
245 unsigned long flags; 245 unsigned long flags;
@@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
504void ipoib_mcast_dev_down(struct net_device *dev); 504void ipoib_mcast_dev_down(struct net_device *dev);
505void ipoib_mcast_dev_flush(struct net_device *dev); 505void ipoib_mcast_dev_flush(struct net_device *dev);
506 506
507int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
508void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
509 struct ipoib_tx_buf *tx_req);
510
511static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
512 struct ipoib_tx_buf *tx_req)
513{
514 int i, off;
515 struct sk_buff *skb = tx_req->skb;
516 skb_frag_t *frags = skb_shinfo(skb)->frags;
517 int nr_frags = skb_shinfo(skb)->nr_frags;
518 u64 *mapping = tx_req->mapping;
519
520 if (skb_headlen(skb)) {
521 priv->tx_sge[0].addr = mapping[0];
522 priv->tx_sge[0].length = skb_headlen(skb);
523 off = 1;
524 } else
525 off = 0;
526
527 for (i = 0; i < nr_frags; ++i) {
528 priv->tx_sge[i + off].addr = mapping[i + off];
529 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
530 }
531 priv->tx_wr.num_sge = nr_frags + off;
532}
533
507#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 534#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
508struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); 535struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
509int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); 536int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index cf32a778e7d0..ee39be6ccfb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -694,14 +694,12 @@ repost:
694static inline int post_send(struct ipoib_dev_priv *priv, 694static inline int post_send(struct ipoib_dev_priv *priv,
695 struct ipoib_cm_tx *tx, 695 struct ipoib_cm_tx *tx,
696 unsigned int wr_id, 696 unsigned int wr_id,
697 u64 addr, int len) 697 struct ipoib_tx_buf *tx_req)
698{ 698{
699 struct ib_send_wr *bad_wr; 699 struct ib_send_wr *bad_wr;
700 700
701 priv->tx_sge[0].addr = addr; 701 ipoib_build_sge(priv, tx_req);
702 priv->tx_sge[0].length = len;
703 702
704 priv->tx_wr.num_sge = 1;
705 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 703 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
706 704
707 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 705 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
710void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 708void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
711{ 709{
712 struct ipoib_dev_priv *priv = netdev_priv(dev); 710 struct ipoib_dev_priv *priv = netdev_priv(dev);
713 struct ipoib_cm_tx_buf *tx_req; 711 struct ipoib_tx_buf *tx_req;
714 u64 addr;
715 int rc; 712 int rc;
716 713
717 if (unlikely(skb->len > tx->mtu)) { 714 if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
735 */ 732 */
736 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 733 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
737 tx_req->skb = skb; 734 tx_req->skb = skb;
738 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 735
739 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 736 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
740 ++dev->stats.tx_errors; 737 ++dev->stats.tx_errors;
741 dev_kfree_skb_any(skb); 738 dev_kfree_skb_any(skb);
742 return; 739 return;
743 } 740 }
744 741
745 tx_req->mapping = addr;
746
747 skb_orphan(skb); 742 skb_orphan(skb);
748 skb_dst_drop(skb); 743 skb_dst_drop(skb);
749 744
750 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 745 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
751 addr, skb->len);
752 if (unlikely(rc)) { 746 if (unlikely(rc)) {
753 ipoib_warn(priv, "post_send failed, error %d\n", rc); 747 ipoib_warn(priv, "post_send failed, error %d\n", rc);
754 ++dev->stats.tx_errors; 748 ++dev->stats.tx_errors;
755 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 749 ipoib_dma_unmap_tx(priv, tx_req);
756 dev_kfree_skb_any(skb); 750 dev_kfree_skb_any(skb);
757 } else { 751 } else {
758 dev->trans_start = jiffies; 752 dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
777 struct ipoib_dev_priv *priv = netdev_priv(dev); 771 struct ipoib_dev_priv *priv = netdev_priv(dev);
778 struct ipoib_cm_tx *tx = wc->qp->qp_context; 772 struct ipoib_cm_tx *tx = wc->qp->qp_context;
779 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; 773 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
780 struct ipoib_cm_tx_buf *tx_req; 774 struct ipoib_tx_buf *tx_req;
781 unsigned long flags; 775 unsigned long flags;
782 776
783 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 777 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
791 785
792 tx_req = &tx->tx_ring[wr_id]; 786 tx_req = &tx->tx_ring[wr_id];
793 787
794 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 788 ipoib_dma_unmap_tx(priv, tx_req);
795 789
796 /* FIXME: is this right? Shouldn't we only increment on success? */ 790 /* FIXME: is this right? Shouldn't we only increment on success? */
797 ++dev->stats.tx_packets; 791 ++dev->stats.tx_packets;
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1036 1030
1037 struct ib_qp *tx_qp; 1031 struct ib_qp *tx_qp;
1038 1032
1033 if (dev->features & NETIF_F_SG)
1034 attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
1035
1039 tx_qp = ib_create_qp(priv->pd, &attr); 1036 tx_qp = ib_create_qp(priv->pd, &attr);
1040 if (PTR_ERR(tx_qp) == -EINVAL) { 1037 if (PTR_ERR(tx_qp) == -EINVAL) {
1041 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", 1038 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ err_tx:
1170static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 1167static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1171{ 1168{
1172 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1169 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1173 struct ipoib_cm_tx_buf *tx_req; 1170 struct ipoib_tx_buf *tx_req;
1174 unsigned long begin; 1171 unsigned long begin;
1175 1172
1176 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1173 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ timeout:
1197 1194
1198 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1195 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1199 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 1196 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1200 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, 1197 ipoib_dma_unmap_tx(priv, tx_req);
1201 DMA_TO_DEVICE);
1202 dev_kfree_skb_any(tx_req->skb); 1198 dev_kfree_skb_any(tx_req->skb);
1203 ++p->tx_tail; 1199 ++p->tx_tail;
1204 netif_tx_lock_bh(p->dev); 1200 netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1455 spin_unlock_irq(&priv->lock); 1451 spin_unlock_irq(&priv->lock);
1456} 1452}
1457 1453
1458
1459static ssize_t show_mode(struct device *d, struct device_attribute *attr, 1454static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1460 char *buf) 1455 char *buf)
1461{ 1456{
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 63b92cbb29ad..d266667ca9b8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -263,8 +263,7 @@ repost:
263 "for buf %d\n", wr_id); 263 "for buf %d\n", wr_id);
264} 264}
265 265
266static int ipoib_dma_map_tx(struct ib_device *ca, 266int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
267 struct ipoib_tx_buf *tx_req)
268{ 267{
269 struct sk_buff *skb = tx_req->skb; 268 struct sk_buff *skb = tx_req->skb;
270 u64 *mapping = tx_req->mapping; 269 u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ partial_error:
305 return -EIO; 304 return -EIO;
306} 305}
307 306
308static void ipoib_dma_unmap_tx(struct ib_device *ca, 307void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
309 struct ipoib_tx_buf *tx_req) 308 struct ipoib_tx_buf *tx_req)
310{ 309{
311 struct sk_buff *skb = tx_req->skb; 310 struct sk_buff *skb = tx_req->skb;
312 u64 *mapping = tx_req->mapping; 311 u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
314 int off; 313 int off;
315 314
316 if (skb_headlen(skb)) { 315 if (skb_headlen(skb)) {
317 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 316 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
317 DMA_TO_DEVICE);
318 off = 1; 318 off = 1;
319 } else 319 } else
320 off = 0; 320 off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
322 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 322 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 323 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
324 324
325 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), 325 ib_dma_unmap_page(priv->ca, mapping[i + off],
326 DMA_TO_DEVICE); 326 skb_frag_size(frag), DMA_TO_DEVICE);
327 } 327 }
328} 328}
329 329
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
389 389
390 tx_req = &priv->tx_ring[wr_id]; 390 tx_req = &priv->tx_ring[wr_id];
391 391
392 ipoib_dma_unmap_tx(priv->ca, tx_req); 392 ipoib_dma_unmap_tx(priv, tx_req);
393 393
394 ++dev->stats.tx_packets; 394 ++dev->stats.tx_packets;
395 dev->stats.tx_bytes += tx_req->skb->len; 395 dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
514 void *head, int hlen) 514 void *head, int hlen)
515{ 515{
516 struct ib_send_wr *bad_wr; 516 struct ib_send_wr *bad_wr;
517 int i, off;
518 struct sk_buff *skb = tx_req->skb; 517 struct sk_buff *skb = tx_req->skb;
519 skb_frag_t *frags = skb_shinfo(skb)->frags;
520 int nr_frags = skb_shinfo(skb)->nr_frags;
521 u64 *mapping = tx_req->mapping;
522 518
523 if (skb_headlen(skb)) { 519 ipoib_build_sge(priv, tx_req);
524 priv->tx_sge[0].addr = mapping[0];
525 priv->tx_sge[0].length = skb_headlen(skb);
526 off = 1;
527 } else
528 off = 0;
529 520
530 for (i = 0; i < nr_frags; ++i) {
531 priv->tx_sge[i + off].addr = mapping[i + off];
532 priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
533 }
534 priv->tx_wr.num_sge = nr_frags + off;
535 priv->tx_wr.wr_id = wr_id; 521 priv->tx_wr.wr_id = wr_id;
536 priv->tx_wr.wr.ud.remote_qpn = qpn; 522 priv->tx_wr.wr.ud.remote_qpn = qpn;
537 priv->tx_wr.wr.ud.ah = address; 523 priv->tx_wr.wr.ud.ah = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
617 ipoib_warn(priv, "post_send failed, error %d\n", rc); 603 ipoib_warn(priv, "post_send failed, error %d\n", rc);
618 ++dev->stats.tx_errors; 604 ++dev->stats.tx_errors;
619 --priv->tx_outstanding; 605 --priv->tx_outstanding;
620 ipoib_dma_unmap_tx(priv->ca, tx_req); 606 ipoib_dma_unmap_tx(priv, tx_req);
621 dev_kfree_skb_any(skb); 607 dev_kfree_skb_any(skb);
622 if (netif_queue_stopped(dev)) 608 if (netif_queue_stopped(dev))
623 netif_wake_queue(dev); 609 netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
868 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 854 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
869 tx_req = &priv->tx_ring[priv->tx_tail & 855 tx_req = &priv->tx_ring[priv->tx_tail &
870 (ipoib_sendq_size - 1)]; 856 (ipoib_sendq_size - 1)];
871 ipoib_dma_unmap_tx(priv->ca, tx_req); 857 ipoib_dma_unmap_tx(priv, tx_req);
872 dev_kfree_skb_any(tx_req->skb); 858 dev_kfree_skb_any(tx_req->skb);
873 ++priv->tx_tail; 859 ++priv->tx_tail;
874 --priv->tx_outstanding; 860 --priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
985} 971}
986 972
987static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 973static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
988 enum ipoib_flush_level level) 974 enum ipoib_flush_level level,
975 int nesting)
989{ 976{
990 struct ipoib_dev_priv *cpriv; 977 struct ipoib_dev_priv *cpriv;
991 struct net_device *dev = priv->dev; 978 struct net_device *dev = priv->dev;
992 int result; 979 int result;
993 980
994 down_read(&priv->vlan_rwsem); 981 down_read_nested(&priv->vlan_rwsem, nesting);
995 982
996 /* 983 /*
997 * Flush any child interfaces too -- they might be up even if 984 * Flush any child interfaces too -- they might be up even if
998 * the parent is down. 985 * the parent is down.
999 */ 986 */
1000 list_for_each_entry(cpriv, &priv->child_intfs, list) 987 list_for_each_entry(cpriv, &priv->child_intfs, list)
1001 __ipoib_ib_dev_flush(cpriv, level); 988 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1002 989
1003 up_read(&priv->vlan_rwsem); 990 up_read(&priv->vlan_rwsem);
1004 991
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
1076 struct ipoib_dev_priv *priv = 1063 struct ipoib_dev_priv *priv =
1077 container_of(work, struct ipoib_dev_priv, flush_light); 1064 container_of(work, struct ipoib_dev_priv, flush_light);
1078 1065
1079 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); 1066 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1080} 1067}
1081 1068
1082void ipoib_ib_dev_flush_normal(struct work_struct *work) 1069void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
1084 struct ipoib_dev_priv *priv = 1071 struct ipoib_dev_priv *priv =
1085 container_of(work, struct ipoib_dev_priv, flush_normal); 1072 container_of(work, struct ipoib_dev_priv, flush_normal);
1086 1073
1087 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); 1074 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1088} 1075}
1089 1076
1090void ipoib_ib_dev_flush_heavy(struct work_struct *work) 1077void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1092 struct ipoib_dev_priv *priv = 1079 struct ipoib_dev_priv *priv =
1093 container_of(work, struct ipoib_dev_priv, flush_heavy); 1080 container_of(work, struct ipoib_dev_priv, flush_heavy);
1094 1081
1095 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); 1082 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1096} 1083}
1097 1084
1098void ipoib_ib_dev_cleanup(struct net_device *dev) 1085void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index da149c278cb8..b2943c84a5dd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
190 struct ipoib_dev_priv *priv = netdev_priv(dev); 190 struct ipoib_dev_priv *priv = netdev_priv(dev);
191 191
192 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 192 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
193 features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO); 193 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
194 194
195 return features; 195 return features;
196} 196}
@@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
232 ipoib_warn(priv, "enabling connected mode " 232 ipoib_warn(priv, "enabling connected mode "
233 "will cause multicast packet drops\n"); 233 "will cause multicast packet drops\n");
234 netdev_update_features(dev); 234 netdev_update_features(dev);
235 dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
235 rtnl_unlock(); 236 rtnl_unlock();
236 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 237 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
237 238
@@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
1577 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1578 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1578 priv->dev->dev_id = port - 1; 1579 priv->dev->dev_id = port - 1;
1579 1580
1580 if (!ib_query_port(hca, port, &attr)) 1581 result = ib_query_port(hca, port, &attr);
1582 if (!result)
1581 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1583 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
1582 else { 1584 else {
1583 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1585 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
1598 goto device_init_failed; 1600 goto device_init_failed;
1599 } 1601 }
1600 1602
1601 if (ipoib_set_dev_features(priv, hca)) 1603 result = ipoib_set_dev_features(priv, hca);
1604 if (result)
1602 goto device_init_failed; 1605 goto device_init_failed;
1603 1606
1604 /* 1607 /*
@@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
1684 struct list_head *dev_list; 1687 struct list_head *dev_list;
1685 struct net_device *dev; 1688 struct net_device *dev;
1686 struct ipoib_dev_priv *priv; 1689 struct ipoib_dev_priv *priv;
1687 int s, e, p; 1690 int p;
1688 int count = 0; 1691 int count = 0;
1689 1692
1690 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); 1693 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
1693 1696
1694 INIT_LIST_HEAD(dev_list); 1697 INIT_LIST_HEAD(dev_list);
1695 1698
1696 if (device->node_type == RDMA_NODE_IB_SWITCH) { 1699 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
1697 s = 0;
1698 e = 0;
1699 } else {
1700 s = 1;
1701 e = device->phys_port_cnt;
1702 }
1703
1704 for (p = s; p <= e; ++p) {
1705 if (!rdma_protocol_ib(device, p)) 1700 if (!rdma_protocol_ib(device, p))
1706 continue; 1701 continue;
1707 dev = ipoib_add_port("ib%d", device, p); 1702 dev = ipoib_add_port("ib%d", device, p);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..851c8219d501 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
177 else 177 else
178 size += ipoib_recvq_size * ipoib_max_conn_qp; 178 size += ipoib_recvq_size * ipoib_max_conn_qp;
179 } else 179 } else
180 goto out_free_wq; 180 if (ret != -ENOSYS)
181 goto out_free_wq;
181 182
182 cq_attr.cqe = size; 183 cq_attr.cqe = size;
183 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, 184 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..d851e1828d6f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
775 ret = isert_rdma_post_recvl(isert_conn); 775 ret = isert_rdma_post_recvl(isert_conn);
776 if (ret) 776 if (ret)
777 goto out_conn_dev; 777 goto out_conn_dev;
778 /*
779 * Obtain the second reference now before isert_rdma_accept() to
780 * ensure that any initiator generated REJECT CM event that occurs
781 * asynchronously won't drop the last reference until the error path
782 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
783 * isert_free_conn() -> isert_put_conn() -> kref_put().
784 */
785 if (!kref_get_unless_zero(&isert_conn->kref)) {
786 isert_warn("conn %p connect_release is running\n", isert_conn);
787 goto out_conn_dev;
788 }
778 789
779 ret = isert_rdma_accept(isert_conn); 790 ret = isert_rdma_accept(isert_conn);
780 if (ret) 791 if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
836 847
837 isert_info("conn %p\n", isert_conn); 848 isert_info("conn %p\n", isert_conn);
838 849
839 if (!kref_get_unless_zero(&isert_conn->kref)) {
840 isert_warn("conn %p connect_release is running\n", isert_conn);
841 return;
842 }
843
844 mutex_lock(&isert_conn->mutex); 850 mutex_lock(&isert_conn->mutex);
845 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 851 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
846 isert_conn->state = ISER_CONN_UP; 852 isert_conn->state = ISER_CONN_UP;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 267dc4f75502..31a20b462266 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
161{ 161{
162 int tmo, res; 162 int tmo, res;
163 163
164 if (strncmp(val, "off", 3) != 0) { 164 res = srp_parse_tmo(&tmo, val);
165 res = kstrtoint(val, 0, &tmo); 165 if (res)
166 if (res) 166 goto out;
167 goto out; 167
168 } else {
169 tmo = -1;
170 }
171 if (kp->arg == &srp_reconnect_delay) 168 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo, 169 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
173 srp_dev_loss_tmo); 170 srp_dev_loss_tmo);
@@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
3379 struct srp_device *srp_dev; 3376 struct srp_device *srp_dev;
3380 struct ib_device_attr *dev_attr; 3377 struct ib_device_attr *dev_attr;
3381 struct srp_host *host; 3378 struct srp_host *host;
3382 int mr_page_shift, s, e, p; 3379 int mr_page_shift, p;
3383 u64 max_pages_per_mr; 3380 u64 max_pages_per_mr;
3384 3381
3385 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 3382 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
3443 if (IS_ERR(srp_dev->mr)) 3440 if (IS_ERR(srp_dev->mr))
3444 goto err_pd; 3441 goto err_pd;
3445 3442
3446 if (device->node_type == RDMA_NODE_IB_SWITCH) { 3443 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3447 s = 0;
3448 e = 0;
3449 } else {
3450 s = 1;
3451 e = device->phys_port_cnt;
3452 }
3453
3454 for (p = s; p <= e; ++p) {
3455 host = srp_add_port(srp_dev, p); 3444 host = srp_add_port(srp_dev, p);
3456 if (host) 3445 if (host)
3457 list_add_tail(&host->list, &srp_dev->dev_list); 3446 list_add_tail(&host->list, &srp_dev->dev_list);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 82897ca17f32..60ff0a2390e5 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
302 int i; 302 int i;
303 303
304 ioui = (struct ib_dm_iou_info *)mad->data; 304 ioui = (struct ib_dm_iou_info *)mad->data;
305 ioui->change_id = __constant_cpu_to_be16(1); 305 ioui->change_id = cpu_to_be16(1);
306 ioui->max_controllers = 16; 306 ioui->max_controllers = 16;
307 307
308 /* set present for slot 1 and empty for the rest */ 308 /* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
330 330
331 if (!slot || slot > 16) { 331 if (!slot || slot > 16) {
332 mad->mad_hdr.status 332 mad->mad_hdr.status
333 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 333 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
334 return; 334 return;
335 } 335 }
336 336
337 if (slot > 2) { 337 if (slot > 2) {
338 mad->mad_hdr.status 338 mad->mad_hdr.status
339 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 339 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
340 return; 340 return;
341 } 341 }
342 342
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
348 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver); 348 iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
349 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id); 349 iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
350 iocp->subsys_device_id = 0x0; 350 iocp->subsys_device_id = 0x0;
351 iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS); 351 iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
352 iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS); 352 iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
353 iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL); 353 iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
354 iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION); 354 iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size); 355 iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
356 iocp->rdma_read_depth = 4; 356 iocp->rdma_read_depth = 4;
357 iocp->send_size = cpu_to_be32(srp_max_req_size); 357 iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
379 379
380 if (!slot || slot > 16) { 380 if (!slot || slot > 16) {
381 mad->mad_hdr.status 381 mad->mad_hdr.status
382 = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD); 382 = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
383 return; 383 return;
384 } 384 }
385 385
386 if (slot > 2 || lo > hi || hi > 1) { 386 if (slot > 2 || lo > hi || hi > 1) {
387 mad->mad_hdr.status 387 mad->mad_hdr.status
388 = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC); 388 = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
389 return; 389 return;
390 } 390 }
391 391
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
436 break; 436 break;
437 default: 437 default:
438 rsp_mad->mad_hdr.status = 438 rsp_mad->mad_hdr.status =
439 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 439 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
440 break; 440 break;
441 } 441 }
442} 442}
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
493 break; 493 break;
494 case IB_MGMT_METHOD_SET: 494 case IB_MGMT_METHOD_SET:
495 dm_mad->mad_hdr.status = 495 dm_mad->mad_hdr.status =
496 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR); 496 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
497 break; 497 break;
498 default: 498 default:
499 dm_mad->mad_hdr.status = 499 dm_mad->mad_hdr.status =
500 __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD); 500 cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
501 break; 501 break;
502 } 502 }
503 503
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
1535 memset(srp_rsp, 0, sizeof *srp_rsp); 1535 memset(srp_rsp, 0, sizeof *srp_rsp);
1536 srp_rsp->opcode = SRP_RSP; 1536 srp_rsp->opcode = SRP_RSP;
1537 srp_rsp->req_lim_delta = 1537 srp_rsp->req_lim_delta =
1538 __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0)); 1538 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1539 srp_rsp->tag = tag; 1539 srp_rsp->tag = tag;
1540 srp_rsp->status = status; 1540 srp_rsp->status = status;
1541 1541
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
1585 memset(srp_rsp, 0, sizeof *srp_rsp); 1585 memset(srp_rsp, 0, sizeof *srp_rsp);
1586 1586
1587 srp_rsp->opcode = SRP_RSP; 1587 srp_rsp->opcode = SRP_RSP;
1588 srp_rsp->req_lim_delta = __constant_cpu_to_be32(1 1588 srp_rsp->req_lim_delta =
1589 + atomic_xchg(&ch->req_lim_delta, 0)); 1589 cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
1590 srp_rsp->tag = tag; 1590 srp_rsp->tag = tag;
1591 1591
1592 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID; 1592 srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
1630 switch (len) { 1630 switch (len) {
1631 case 8: 1631 case 8:
1632 if ((*((__be64 *)lun) & 1632 if ((*((__be64 *)lun) &
1633 __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0) 1633 cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
1634 goto out_err; 1634 goto out_err;
1635 break; 1635 break;
1636 case 4: 1636 case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2449 } 2449 }
2450 2450
2451 if (it_iu_len > srp_max_req_size || it_iu_len < 64) { 2451 if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
2452 rej->reason = __constant_cpu_to_be32( 2452 rej->reason = cpu_to_be32(
2453 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE); 2453 SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
2454 ret = -EINVAL; 2454 ret = -EINVAL;
2455 pr_err("rejected SRP_LOGIN_REQ because its" 2455 pr_err("rejected SRP_LOGIN_REQ because its"
2456 " length (%d bytes) is out of range (%d .. %d)\n", 2456 " length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2459 } 2459 }
2460 2460
2461 if (!sport->enabled) { 2461 if (!sport->enabled) {
2462 rej->reason = __constant_cpu_to_be32( 2462 rej->reason = cpu_to_be32(
2463 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2463 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2464 ret = -EINVAL; 2464 ret = -EINVAL;
2465 pr_err("rejected SRP_LOGIN_REQ because the target port" 2465 pr_err("rejected SRP_LOGIN_REQ because the target port"
2466 " has not yet been enabled\n"); 2466 " has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2505 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid) 2505 if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
2506 || *(__be64 *)(req->target_port_id + 8) != 2506 || *(__be64 *)(req->target_port_id + 8) !=
2507 cpu_to_be64(srpt_service_guid)) { 2507 cpu_to_be64(srpt_service_guid)) {
2508 rej->reason = __constant_cpu_to_be32( 2508 rej->reason = cpu_to_be32(
2509 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL); 2509 SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
2510 ret = -ENOMEM; 2510 ret = -ENOMEM;
2511 pr_err("rejected SRP_LOGIN_REQ because it" 2511 pr_err("rejected SRP_LOGIN_REQ because it"
2512 " has an invalid target port identifier.\n"); 2512 " has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2515 2515
2516 ch = kzalloc(sizeof *ch, GFP_KERNEL); 2516 ch = kzalloc(sizeof *ch, GFP_KERNEL);
2517 if (!ch) { 2517 if (!ch) {
2518 rej->reason = __constant_cpu_to_be32( 2518 rej->reason = cpu_to_be32(
2519 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2519 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2520 pr_err("rejected SRP_LOGIN_REQ because no memory.\n"); 2520 pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
2521 ret = -ENOMEM; 2521 ret = -ENOMEM;
2522 goto reject; 2522 goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2552 2552
2553 ret = srpt_create_ch_ib(ch); 2553 ret = srpt_create_ch_ib(ch);
2554 if (ret) { 2554 if (ret) {
2555 rej->reason = __constant_cpu_to_be32( 2555 rej->reason = cpu_to_be32(
2556 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2556 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2557 pr_err("rejected SRP_LOGIN_REQ because creating" 2557 pr_err("rejected SRP_LOGIN_REQ because creating"
2558 " a new RDMA channel failed.\n"); 2558 " a new RDMA channel failed.\n");
2559 goto free_ring; 2559 goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2561 2561
2562 ret = srpt_ch_qp_rtr(ch, ch->qp); 2562 ret = srpt_ch_qp_rtr(ch, ch->qp);
2563 if (ret) { 2563 if (ret) {
2564 rej->reason = __constant_cpu_to_be32( 2564 rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2565 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2566 pr_err("rejected SRP_LOGIN_REQ because enabling" 2565 pr_err("rejected SRP_LOGIN_REQ because enabling"
2567 " RTR failed (error code = %d)\n", ret); 2566 " RTR failed (error code = %d)\n", ret);
2568 goto destroy_ib; 2567 goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2580 if (!nacl) { 2579 if (!nacl) {
2581 pr_info("Rejected login because no ACL has been" 2580 pr_info("Rejected login because no ACL has been"
2582 " configured yet for initiator %s.\n", ch->sess_name); 2581 " configured yet for initiator %s.\n", ch->sess_name);
2583 rej->reason = __constant_cpu_to_be32( 2582 rej->reason = cpu_to_be32(
2584 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED); 2583 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
2585 goto destroy_ib; 2584 goto destroy_ib;
2586 } 2585 }
2587 2586
2588 ch->sess = transport_init_session(TARGET_PROT_NORMAL); 2587 ch->sess = transport_init_session(TARGET_PROT_NORMAL);
2589 if (IS_ERR(ch->sess)) { 2588 if (IS_ERR(ch->sess)) {
2590 rej->reason = __constant_cpu_to_be32( 2589 rej->reason = cpu_to_be32(
2591 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES); 2590 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
2592 pr_debug("Failed to create session\n"); 2591 pr_debug("Failed to create session\n");
2593 goto deregister_session; 2592 goto deregister_session;
2594 } 2593 }
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
2604 rsp->max_it_iu_len = req->req_it_iu_len; 2603 rsp->max_it_iu_len = req->req_it_iu_len;
2605 rsp->max_ti_iu_len = req->req_it_iu_len; 2604 rsp->max_ti_iu_len = req->req_it_iu_len;
2606 ch->max_ti_iu_len = it_iu_len; 2605 ch->max_ti_iu_len = it_iu_len;
2607 rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2606 rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2608 | SRP_BUF_FORMAT_INDIRECT); 2607 | SRP_BUF_FORMAT_INDIRECT);
2609 rsp->req_lim_delta = cpu_to_be32(ch->rq_size); 2608 rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
2610 atomic_set(&ch->req_lim, ch->rq_size); 2609 atomic_set(&ch->req_lim, ch->rq_size);
2611 atomic_set(&ch->req_lim_delta, 0); 2610 atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ free_ch:
2655reject: 2654reject:
2656 rej->opcode = SRP_LOGIN_REJ; 2655 rej->opcode = SRP_LOGIN_REJ;
2657 rej->tag = req->tag; 2656 rej->tag = req->tag;
2658 rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT 2657 rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
2659 | SRP_BUF_FORMAT_INDIRECT); 2658 | SRP_BUF_FORMAT_INDIRECT);
2660 2659
2661 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2660 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2662 (void *)rej, sizeof *rej); 2661 (void *)rej, sizeof *rej);
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
71{ 71{
72} 72}
73 73
74static int input_leds_get_count(struct input_dev *dev)
75{
76 unsigned int led_code;
77 int count = 0;
78
79 for_each_set_bit(led_code, dev->ledbit, LED_CNT)
80 if (input_led_info[led_code].name)
81 count++;
82
83 return count;
84}
85
74static int input_leds_connect(struct input_handler *handler, 86static int input_leds_connect(struct input_handler *handler,
75 struct input_dev *dev, 87 struct input_dev *dev,
76 const struct input_device_id *id) 88 const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
81 int led_no; 93 int led_no;
82 int error; 94 int error;
83 95
84 num_leds = bitmap_weight(dev->ledbit, LED_CNT); 96 num_leds = input_leds_get_count(dev);
85 if (!num_leds) 97 if (!num_leds)
86 return -ENXIO; 98 return -ENXIO;
87 99
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
112 led->handle = &leds->handle; 124 led->handle = &leds->handle;
113 led->code = led_code; 125 led->code = led_code;
114 126
115 if (WARN_ON(!input_led_info[led_code].name)) 127 if (!input_led_info[led_code].name)
116 continue; 128 continue;
117 129
118 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
196 if (n_buttons[i] < 1) 196 if (n_buttons[i] < 1)
197 continue; 197 continue;
198 198
199 if (n_buttons[i] > 6) { 199 if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); 200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
201 err = -EINVAL; 201 err = -EINVAL;
202 goto err_unreg_devs; 202 goto err_unreg_devs;
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d7216d98e..c6dc644aa580 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
246 * convert it to descriptor. 246 * convert it to descriptor.
247 */ 247 */
248 if (!button->gpiod && gpio_is_valid(button->gpio)) { 248 if (!button->gpiod && gpio_is_valid(button->gpio)) {
249 unsigned flags = 0; 249 unsigned flags = GPIOF_IN;
250 250
251 if (button->active_low) 251 if (button->active_low)
252 flags |= GPIOF_ACTIVE_LOW; 252 flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
292MODULE_DESCRIPTION("axp20x Power Button"); 292MODULE_DESCRIPTION("axp20x Power Button");
293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
294MODULE_LICENSE("GPL"); 294MODULE_LICENSE("GPL");
295MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index e5d60ecd29a4..f5c9cf2f4073 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -313,14 +313,14 @@ static void drv260x_close(struct input_dev *input)
313 gpiod_set_value(haptics->enable_gpio, 0); 313 gpiod_set_value(haptics->enable_gpio, 0);
314} 314}
315 315
316static const struct reg_default drv260x_lra_cal_regs[] = { 316static const struct reg_sequence drv260x_lra_cal_regs[] = {
317 { DRV260X_MODE, DRV260X_AUTO_CAL }, 317 { DRV260X_MODE, DRV260X_AUTO_CAL },
318 { DRV260X_CTRL3, DRV260X_NG_THRESH_2 }, 318 { DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
319 { DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE | 319 { DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
320 DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH }, 320 DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
321}; 321};
322 322
323static const struct reg_default drv260x_lra_init_regs[] = { 323static const struct reg_sequence drv260x_lra_init_regs[] = {
324 { DRV260X_MODE, DRV260X_RT_PLAYBACK }, 324 { DRV260X_MODE, DRV260X_RT_PLAYBACK },
325 { DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS | 325 { DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
326 DRV260X_AUDIO_HAPTICS_FILTER_125HZ }, 326 DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
@@ -337,7 +337,7 @@ static const struct reg_default drv260x_lra_init_regs[] = {
337 { DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS }, 337 { DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
338}; 338};
339 339
340static const struct reg_default drv260x_erm_cal_regs[] = { 340static const struct reg_sequence drv260x_erm_cal_regs[] = {
341 { DRV260X_MODE, DRV260X_AUTO_CAL }, 341 { DRV260X_MODE, DRV260X_AUTO_CAL },
342 { DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT }, 342 { DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
343 { DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT }, 343 { DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index 0afaa33de07d..924456e3ca75 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -132,7 +132,7 @@ static void drv2665_close(struct input_dev *input)
132 "Failed to enter standby mode: %d\n", error); 132 "Failed to enter standby mode: %d\n", error);
133} 133}
134 134
135static const struct reg_default drv2665_init_regs[] = { 135static const struct reg_sequence drv2665_init_regs[] = {
136 { DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT }, 136 { DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT },
137 { DRV2665_CTRL_1, DRV2665_25_VPP_GAIN }, 137 { DRV2665_CTRL_1, DRV2665_25_VPP_GAIN },
138}; 138};
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index fc0fddf0896a..047136aa646f 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -262,14 +262,14 @@ static void drv2667_close(struct input_dev *input)
262 "Failed to enter standby mode: %d\n", error); 262 "Failed to enter standby mode: %d\n", error);
263} 263}
264 264
265static const struct reg_default drv2667_init_regs[] = { 265static const struct reg_sequence drv2667_init_regs[] = {
266 { DRV2667_CTRL_2, 0 }, 266 { DRV2667_CTRL_2, 0 },
267 { DRV2667_CTRL_1, DRV2667_25_VPP_GAIN }, 267 { DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
268 { DRV2667_WV_SEQ_0, 1 }, 268 { DRV2667_WV_SEQ_0, 1 },
269 { DRV2667_WV_SEQ_1, 0 } 269 { DRV2667_WV_SEQ_1, 0 }
270}; 270};
271 271
272static const struct reg_default drv2667_page1_init[] = { 272static const struct reg_sequence drv2667_page1_init[] = {
273 { DRV2667_RAM_HDR_SZ, 0x05 }, 273 { DRV2667_RAM_HDR_SZ, 0x05 },
274 { DRV2667_RAM_START_HI, 0x80 }, 274 { DRV2667_RAM_START_HI, 0x80 },
275 { DRV2667_RAM_START_LO, 0x06 }, 275 { DRV2667_RAM_START_LO, 0x06 },
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
183 if (pdata && pdata->coexist) 183 if (pdata && pdata->coexist)
184 return true; 184 return true;
185 185
186 if (of_find_node_by_name(node, "codec")) { 186 node = of_find_node_by_name(node, "codec");
187 if (node) {
187 of_node_put(node); 188 of_node_put(node);
188 return true; 189 return true;
189 } 190 }
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
20#include <linux/input/mt.h> 20#include <linux/input/mt.h>
21#include <linux/serio.h> 21#include <linux/serio.h>
22#include <linux/libps2.h> 22#include <linux/libps2.h>
23#include <linux/dmi.h>
23 24
24#include "psmouse.h" 25#include "psmouse.h"
25#include "alps.h" 26#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 100#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 101#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
101 6-byte ALPS packet */ 102 6-byte ALPS packet */
103#define ALPS_DELL 0x100 /* device is a Dell laptop */
102#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 104#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
103 105
104static const struct alps_model_info alps_model_data[] = { 106static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
251 return; 253 return;
252 } 254 }
253 255
254 /* Non interleaved V2 dualpoint has separate stick button bits */ 256 /* Dell non interleaved V2 dualpoint has separate stick button bits */
255 if (priv->proto_version == ALPS_PROTO_V2 && 257 if (priv->proto_version == ALPS_PROTO_V2 &&
256 priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { 258 priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
257 left |= packet[0] & 1; 259 left |= packet[0] & 1;
258 right |= packet[0] & 2; 260 right |= packet[0] & 2;
259 middle |= packet[0] & 4; 261 middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
2550 priv->byte0 = protocol->byte0; 2552 priv->byte0 = protocol->byte0;
2551 priv->mask0 = protocol->mask0; 2553 priv->mask0 = protocol->mask0;
2552 priv->flags = protocol->flags; 2554 priv->flags = protocol->flags;
2555 if (dmi_name_in_vendors("Dell"))
2556 priv->flags |= ALPS_DELL;
2553 2557
2554 priv->x_max = 2000; 2558 priv->x_max = 2000;
2555 priv->y_max = 1400; 2559 priv->y_max = 1400;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver 2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
3 * 3 *
4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) 4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
5 * Copyright (C) 2015 John Horan (knasher@gmail.com)
5 * 6 *
6 * The USB initialization and package decoding was made by 7 * The USB initialization and package decoding was made by
7 * Scott Shawcroft as part of the touchd user-space driver project: 8 * Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
91#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 93#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
93#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 94#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
95/* MacbookPro12,1 (2015) */
96#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
97#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
98#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
94 99
95#define BCM5974_DEVICE(prod) { \ 100#define BCM5974_DEVICE(prod) { \
96 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 101 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
152 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), 157 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
153 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), 158 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
154 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 159 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
160 /* MacbookPro12,1 */
161 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
162 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
163 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
155 /* Terminating entry */ 164 /* Terminating entry */
156 {} 165 {}
157}; 166};
@@ -180,21 +189,47 @@ struct bt_data {
180enum tp_type { 189enum tp_type {
181 TYPE1, /* plain trackpad */ 190 TYPE1, /* plain trackpad */
182 TYPE2, /* button integrated in trackpad */ 191 TYPE2, /* button integrated in trackpad */
183 TYPE3 /* additional header fields since June 2013 */ 192 TYPE3, /* additional header fields since June 2013 */
193 TYPE4 /* additional header field for pressure data */
184}; 194};
185 195
186/* trackpad finger data offsets, le16-aligned */ 196/* trackpad finger data offsets, le16-aligned */
187#define FINGER_TYPE1 (13 * sizeof(__le16)) 197#define HEADER_TYPE1 (13 * sizeof(__le16))
188#define FINGER_TYPE2 (15 * sizeof(__le16)) 198#define HEADER_TYPE2 (15 * sizeof(__le16))
189#define FINGER_TYPE3 (19 * sizeof(__le16)) 199#define HEADER_TYPE3 (19 * sizeof(__le16))
200#define HEADER_TYPE4 (23 * sizeof(__le16))
190 201
191/* trackpad button data offsets */ 202/* trackpad button data offsets */
203#define BUTTON_TYPE1 0
192#define BUTTON_TYPE2 15 204#define BUTTON_TYPE2 15
193#define BUTTON_TYPE3 23 205#define BUTTON_TYPE3 23
206#define BUTTON_TYPE4 31
194 207
195/* list of device capability bits */ 208/* list of device capability bits */
196#define HAS_INTEGRATED_BUTTON 1 209#define HAS_INTEGRATED_BUTTON 1
197 210
211/* trackpad finger data block size */
212#define FSIZE_TYPE1 (14 * sizeof(__le16))
213#define FSIZE_TYPE2 (14 * sizeof(__le16))
214#define FSIZE_TYPE3 (14 * sizeof(__le16))
215#define FSIZE_TYPE4 (15 * sizeof(__le16))
216
217/* offset from header to finger struct */
218#define DELTA_TYPE1 (0 * sizeof(__le16))
219#define DELTA_TYPE2 (0 * sizeof(__le16))
220#define DELTA_TYPE3 (0 * sizeof(__le16))
221#define DELTA_TYPE4 (1 * sizeof(__le16))
222
223/* usb control message mode switch data */
224#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
225#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
226#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
227#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
228
229/* Wellspring initialization constants */
230#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
231#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
232
198/* trackpad finger structure, le16-aligned */ 233/* trackpad finger structure, le16-aligned */
199struct tp_finger { 234struct tp_finger {
200 __le16 origin; /* zero when switching track finger */ 235 __le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
207 __le16 orientation; /* 16384 when point, else 15 bit angle */ 242 __le16 orientation; /* 16384 when point, else 15 bit angle */
208 __le16 touch_major; /* touch area, major axis */ 243 __le16 touch_major; /* touch area, major axis */
209 __le16 touch_minor; /* touch area, minor axis */ 244 __le16 touch_minor; /* touch area, minor axis */
210 __le16 unused[3]; /* zeros */ 245 __le16 unused[2]; /* zeros */
246 __le16 pressure; /* pressure on forcetouch touchpad */
211 __le16 multi; /* one finger: varies, more fingers: constant */ 247 __le16 multi; /* one finger: varies, more fingers: constant */
212} __attribute__((packed,aligned(2))); 248} __attribute__((packed,aligned(2)));
213 249
214/* trackpad finger data size, empirically at least ten fingers */ 250/* trackpad finger data size, empirically at least ten fingers */
215#define MAX_FINGERS 16 251#define MAX_FINGERS 16
216#define SIZEOF_FINGER sizeof(struct tp_finger)
217#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
218#define MAX_FINGER_ORIENTATION 16384 252#define MAX_FINGER_ORIENTATION 16384
219 253
220/* device-specific parameters */ 254/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
232 int bt_datalen; /* data length of the button interface */ 266 int bt_datalen; /* data length of the button interface */
233 int tp_ep; /* the endpoint of the trackpad interface */ 267 int tp_ep; /* the endpoint of the trackpad interface */
234 enum tp_type tp_type; /* type of trackpad interface */ 268 enum tp_type tp_type; /* type of trackpad interface */
235 int tp_offset; /* offset to trackpad finger data */ 269 int tp_header; /* bytes in header block */
236 int tp_datalen; /* data length of the trackpad interface */ 270 int tp_datalen; /* data length of the trackpad interface */
271 int tp_button; /* offset to button data */
272 int tp_fsize; /* bytes in single finger block */
273 int tp_delta; /* offset from header to finger struct */
274 int um_size; /* usb control message length */
275 int um_req_val; /* usb control message value */
276 int um_req_idx; /* usb control message index */
277 int um_switch_idx; /* usb control message mode switch index */
278 int um_switch_on; /* usb control message mode switch on */
279 int um_switch_off; /* usb control message mode switch off */
237 struct bcm5974_param p; /* finger pressure limits */ 280 struct bcm5974_param p; /* finger pressure limits */
238 struct bcm5974_param w; /* finger width limits */ 281 struct bcm5974_param w; /* finger width limits */
239 struct bcm5974_param x; /* horizontal limits */ 282 struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
259 int slots[MAX_FINGERS]; /* slot assignments */ 302 int slots[MAX_FINGERS]; /* slot assignments */
260}; 303};
261 304
305/* trackpad finger block data, le16-aligned */
306static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
307{
308 const struct bcm5974_config *c = &dev->cfg;
309 u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
310
311 return (const struct tp_finger *)(f_base + i * c->tp_fsize);
312}
313
314#define DATAFORMAT(type) \
315 type, \
316 HEADER_##type, \
317 HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
318 BUTTON_##type, \
319 FSIZE_##type, \
320 DELTA_##type, \
321 USBMSG_##type
322
262/* logical signal quality */ 323/* logical signal quality */
263#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ 324#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
264#define SN_WIDTH 25 /* width signal-to-noise ratio */ 325#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
273 USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 334 USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
274 0, 335 0,
275 0x84, sizeof(struct bt_data), 336 0x84, sizeof(struct bt_data),
276 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 337 0x81, DATAFORMAT(TYPE1),
277 { SN_PRESSURE, 0, 256 }, 338 { SN_PRESSURE, 0, 256 },
278 { SN_WIDTH, 0, 2048 }, 339 { SN_WIDTH, 0, 2048 },
279 { SN_COORD, -4824, 5342 }, 340 { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
286 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 347 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
287 0, 348 0,
288 0x84, sizeof(struct bt_data), 349 0x84, sizeof(struct bt_data),
289 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 350 0x81, DATAFORMAT(TYPE1),
290 { SN_PRESSURE, 0, 256 }, 351 { SN_PRESSURE, 0, 256 },
291 { SN_WIDTH, 0, 2048 }, 352 { SN_WIDTH, 0, 2048 },
292 { SN_COORD, -4824, 4824 }, 353 { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
299 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, 360 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
300 HAS_INTEGRATED_BUTTON, 361 HAS_INTEGRATED_BUTTON,
301 0x84, sizeof(struct bt_data), 362 0x84, sizeof(struct bt_data),
302 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 363 0x81, DATAFORMAT(TYPE2),
303 { SN_PRESSURE, 0, 300 }, 364 { SN_PRESSURE, 0, 300 },
304 { SN_WIDTH, 0, 2048 }, 365 { SN_WIDTH, 0, 2048 },
305 { SN_COORD, -4460, 5166 }, 366 { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
312 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, 373 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
313 HAS_INTEGRATED_BUTTON, 374 HAS_INTEGRATED_BUTTON,
314 0x84, sizeof(struct bt_data), 375 0x84, sizeof(struct bt_data),
315 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 376 0x81, DATAFORMAT(TYPE2),
316 { SN_PRESSURE, 0, 300 }, 377 { SN_PRESSURE, 0, 300 },
317 { SN_WIDTH, 0, 2048 }, 378 { SN_WIDTH, 0, 2048 },
318 { SN_COORD, -4620, 5140 }, 379 { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
325 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, 386 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
326 HAS_INTEGRATED_BUTTON, 387 HAS_INTEGRATED_BUTTON,
327 0x84, sizeof(struct bt_data), 388 0x84, sizeof(struct bt_data),
328 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 389 0x81, DATAFORMAT(TYPE2),
329 { SN_PRESSURE, 0, 300 }, 390 { SN_PRESSURE, 0, 300 },
330 { SN_WIDTH, 0, 2048 }, 391 { SN_WIDTH, 0, 2048 },
331 { SN_COORD, -4616, 5112 }, 392 { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
338 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, 399 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
339 HAS_INTEGRATED_BUTTON, 400 HAS_INTEGRATED_BUTTON,
340 0x84, sizeof(struct bt_data), 401 0x84, sizeof(struct bt_data),
341 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 402 0x81, DATAFORMAT(TYPE2),
342 { SN_PRESSURE, 0, 300 }, 403 { SN_PRESSURE, 0, 300 },
343 { SN_WIDTH, 0, 2048 }, 404 { SN_WIDTH, 0, 2048 },
344 { SN_COORD, -4415, 5050 }, 405 { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
351 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, 412 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
352 HAS_INTEGRATED_BUTTON, 413 HAS_INTEGRATED_BUTTON,
353 0x84, sizeof(struct bt_data), 414 0x84, sizeof(struct bt_data),
354 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 415 0x81, DATAFORMAT(TYPE2),
355 { SN_PRESSURE, 0, 300 }, 416 { SN_PRESSURE, 0, 300 },
356 { SN_WIDTH, 0, 2048 }, 417 { SN_WIDTH, 0, 2048 },
357 { SN_COORD, -4620, 5140 }, 418 { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
364 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, 425 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
365 HAS_INTEGRATED_BUTTON, 426 HAS_INTEGRATED_BUTTON,
366 0x84, sizeof(struct bt_data), 427 0x84, sizeof(struct bt_data),
367 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 428 0x81, DATAFORMAT(TYPE2),
368 { SN_PRESSURE, 0, 300 }, 429 { SN_PRESSURE, 0, 300 },
369 { SN_WIDTH, 0, 2048 }, 430 { SN_WIDTH, 0, 2048 },
370 { SN_COORD, -4750, 5280 }, 431 { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
377 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 438 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
378 HAS_INTEGRATED_BUTTON, 439 HAS_INTEGRATED_BUTTON,
379 0x84, sizeof(struct bt_data), 440 0x84, sizeof(struct bt_data),
380 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 441 0x81, DATAFORMAT(TYPE2),
381 { SN_PRESSURE, 0, 300 }, 442 { SN_PRESSURE, 0, 300 },
382 { SN_WIDTH, 0, 2048 }, 443 { SN_WIDTH, 0, 2048 },
383 { SN_COORD, -4620, 5140 }, 444 { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
390 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, 451 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
391 HAS_INTEGRATED_BUTTON, 452 HAS_INTEGRATED_BUTTON,
392 0x84, sizeof(struct bt_data), 453 0x84, sizeof(struct bt_data),
393 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 454 0x81, DATAFORMAT(TYPE2),
394 { SN_PRESSURE, 0, 300 }, 455 { SN_PRESSURE, 0, 300 },
395 { SN_WIDTH, 0, 2048 }, 456 { SN_WIDTH, 0, 2048 },
396 { SN_COORD, -4750, 5280 }, 457 { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
403 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, 464 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
404 HAS_INTEGRATED_BUTTON, 465 HAS_INTEGRATED_BUTTON,
405 0x84, sizeof(struct bt_data), 466 0x84, sizeof(struct bt_data),
406 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 467 0x81, DATAFORMAT(TYPE2),
407 { SN_PRESSURE, 0, 300 }, 468 { SN_PRESSURE, 0, 300 },
408 { SN_WIDTH, 0, 2048 }, 469 { SN_WIDTH, 0, 2048 },
409 { SN_COORD, -4750, 5280 }, 470 { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
416 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, 477 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
417 HAS_INTEGRATED_BUTTON, 478 HAS_INTEGRATED_BUTTON,
418 0, sizeof(struct bt_data), 479 0, sizeof(struct bt_data),
419 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, 480 0x83, DATAFORMAT(TYPE3),
420 { SN_PRESSURE, 0, 300 }, 481 { SN_PRESSURE, 0, 300 },
421 { SN_WIDTH, 0, 2048 }, 482 { SN_WIDTH, 0, 2048 },
422 { SN_COORD, -4620, 5140 }, 483 { SN_COORD, -4620, 5140 },
423 { SN_COORD, -150, 6600 }, 484 { SN_COORD, -150, 6600 },
424 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 485 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
425 }, 486 },
487 {
488 USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
489 USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
490 USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
491 HAS_INTEGRATED_BUTTON,
492 0, sizeof(struct bt_data),
493 0x83, DATAFORMAT(TYPE4),
494 { SN_PRESSURE, 0, 300 },
495 { SN_WIDTH, 0, 2048 },
496 { SN_COORD, -4828, 5345 },
497 { SN_COORD, -203, 6803 },
498 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
499 },
426 {} 500 {}
427}; 501};
428 502
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
549 struct input_dev *input = dev->input; 623 struct input_dev *input = dev->input;
550 int raw_n, i, n = 0; 624 int raw_n, i, n = 0;
551 625
552 if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) 626 if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
553 return -EIO; 627 return -EIO;
554 628
555 /* finger data, le16-aligned */ 629 raw_n = (size - c->tp_header) / c->tp_fsize;
556 f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
557 raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
558 630
559 for (i = 0; i < raw_n; i++) { 631 for (i = 0; i < raw_n; i++) {
560 if (raw2int(f[i].touch_major) == 0) 632 f = get_tp_finger(dev, i);
633 if (raw2int(f->touch_major) == 0)
561 continue; 634 continue;
562 dev->pos[n].x = raw2int(f[i].abs_x); 635 dev->pos[n].x = raw2int(f->abs_x);
563 dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); 636 dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
564 dev->index[n++] = &f[i]; 637 dev->index[n++] = f;
565 } 638 }
566 639
567 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); 640 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
572 645
573 input_mt_sync_frame(input); 646 input_mt_sync_frame(input);
574 647
575 report_synaptics_data(input, c, f, raw_n); 648 report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
576 649
577 /* type 2 reports button events via ibt only */ 650 /* later types report button events via integrated button only */
578 if (c->tp_type == TYPE2) { 651 if (c->caps & HAS_INTEGRATED_BUTTON) {
579 int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); 652 int ibt = raw2int(dev->tp_data[c->tp_button]);
580 input_report_key(input, BTN_LEFT, ibt); 653 input_report_key(input, BTN_LEFT, ibt);
581 } 654 }
582 655
583 if (c->tp_type == TYPE3)
584 input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
585
586 input_sync(input); 656 input_sync(input);
587 657
588 return 0; 658 return 0;
589} 659}
590 660
591/* Wellspring initialization constants */
592#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
593#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
594#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
595#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
596#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
597#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
598
599static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) 661static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
600{ 662{
663 const struct bcm5974_config *c = &dev->cfg;
601 int retval = 0, size; 664 int retval = 0, size;
602 char *data; 665 char *data;
603 666
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
605 if (dev->cfg.tp_type == TYPE3) 668 if (dev->cfg.tp_type == TYPE3)
606 return 0; 669 return 0;
607 670
608 data = kmalloc(8, GFP_KERNEL); 671 data = kmalloc(c->um_size, GFP_KERNEL);
609 if (!data) { 672 if (!data) {
610 dev_err(&dev->intf->dev, "out of memory\n"); 673 dev_err(&dev->intf->dev, "out of memory\n");
611 retval = -ENOMEM; 674 retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
616 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 679 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
617 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, 680 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
618 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 681 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
619 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 682 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
620 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
621 683
622 if (size != 8) { 684 if (size != c->um_size) {
623 dev_err(&dev->intf->dev, "could not read from device\n"); 685 dev_err(&dev->intf->dev, "could not read from device\n");
624 retval = -EIO; 686 retval = -EIO;
625 goto out; 687 goto out;
626 } 688 }
627 689
628 /* apply the mode switch */ 690 /* apply the mode switch */
629 data[0] = on ? 691 data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
630 BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
631 BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
632 692
633 /* write configuration */ 693 /* write configuration */
634 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 694 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
635 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, 695 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
636 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 696 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
637 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 697 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
638 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
639 698
640 if (size != 8) { 699 if (size != c->um_size) {
641 dev_err(&dev->intf->dev, "could not write to device\n"); 700 dev_err(&dev->intf->dev, "could not write to device\n");
642 retval = -EIO; 701 retval = -EIO;
643 goto out; 702 goto out;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 62641f2adaf7..5b5f403d8ce6 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -771,7 +771,7 @@ static const struct attribute_group *elan_sysfs_groups[] = {
771 */ 771 */
772static void elan_report_contact(struct elan_tp_data *data, 772static void elan_report_contact(struct elan_tp_data *data,
773 int contact_num, bool contact_valid, 773 int contact_num, bool contact_valid,
774 bool hover_event, u8 *finger_data) 774 u8 *finger_data)
775{ 775{
776 struct input_dev *input = data->input; 776 struct input_dev *input = data->input;
777 unsigned int pos_x, pos_y; 777 unsigned int pos_x, pos_y;
@@ -815,9 +815,7 @@ static void elan_report_contact(struct elan_tp_data *data,
815 input_mt_report_slot_state(input, MT_TOOL_FINGER, true); 815 input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
816 input_report_abs(input, ABS_MT_POSITION_X, pos_x); 816 input_report_abs(input, ABS_MT_POSITION_X, pos_x);
817 input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y); 817 input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y);
818 input_report_abs(input, ABS_MT_DISTANCE, hover_event); 818 input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure);
819 input_report_abs(input, ABS_MT_PRESSURE,
820 hover_event ? 0 : scaled_pressure);
821 input_report_abs(input, ABS_TOOL_WIDTH, mk_x); 819 input_report_abs(input, ABS_TOOL_WIDTH, mk_x);
822 input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); 820 input_report_abs(input, ABS_MT_TOUCH_MAJOR, major);
823 input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); 821 input_report_abs(input, ABS_MT_TOUCH_MINOR, minor);
@@ -839,14 +837,14 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
839 hover_event = hover_info & 0x40; 837 hover_event = hover_info & 0x40;
840 for (i = 0; i < ETP_MAX_FINGERS; i++) { 838 for (i = 0; i < ETP_MAX_FINGERS; i++) {
841 contact_valid = tp_info & (1U << (3 + i)); 839 contact_valid = tp_info & (1U << (3 + i));
842 elan_report_contact(data, i, contact_valid, hover_event, 840 elan_report_contact(data, i, contact_valid, finger_data);
843 finger_data);
844 841
845 if (contact_valid) 842 if (contact_valid)
846 finger_data += ETP_FINGER_DATA_LEN; 843 finger_data += ETP_FINGER_DATA_LEN;
847 } 844 }
848 845
849 input_report_key(input, BTN_LEFT, tp_info & 0x01); 846 input_report_key(input, BTN_LEFT, tp_info & 0x01);
847 input_report_abs(input, ABS_DISTANCE, hover_event != 0);
850 input_mt_report_pointer_emulation(input, true); 848 input_mt_report_pointer_emulation(input, true);
851 input_sync(input); 849 input_sync(input);
852} 850}
@@ -922,6 +920,7 @@ static int elan_setup_input_device(struct elan_tp_data *data)
922 input_abs_set_res(input, ABS_Y, data->y_res); 920 input_abs_set_res(input, ABS_Y, data->y_res);
923 input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); 921 input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0);
924 input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0); 922 input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0);
923 input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
925 924
926 /* And MT parameters */ 925 /* And MT parameters */
927 input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0); 926 input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0);
@@ -934,7 +933,6 @@ static int elan_setup_input_device(struct elan_tp_data *data)
934 ETP_FINGER_WIDTH * max_width, 0, 0); 933 ETP_FINGER_WIDTH * max_width, 0, 0);
935 input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 934 input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0,
936 ETP_FINGER_WIDTH * min_width, 0, 0); 935 ETP_FINGER_WIDTH * min_width, 0, 0);
937 input_set_abs_params(input, ABS_MT_DISTANCE, 0, 1, 0, 0);
938 936
939 data->input = input; 937 data->input = input;
940 938
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
783 struct elantech_data *etd = psmouse->private; 783 struct elantech_data *etd = psmouse->private;
784 unsigned char *packet = psmouse->packet; 784 unsigned char *packet = psmouse->packet;
785 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
786 unsigned int ic_version;
786 bool sanity_check; 787 bool sanity_check;
787 788
788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) 789 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
789 return PACKET_TRACKPOINT; 790 return PACKET_TRACKPOINT;
790 791
792 /* This represents the version of IC body. */
793 ic_version = (etd->fw_version & 0x0f0000) >> 16;
794
791 /* 795 /*
792 * Sanity check based on the constant bits of a packet. 796 * Sanity check based on the constant bits of a packet.
793 * The constant bits change depending on the value of 797 * The constant bits change depending on the value of
794 * the hardware flag 'crc_enabled' but are the same for 798 * the hardware flag 'crc_enabled' and the version of
795 * every packet, regardless of the type. 799 * the IC body, but are the same for every packet,
800 * regardless of the type.
796 */ 801 */
797 if (etd->crc_enabled) 802 if (etd->crc_enabled)
798 sanity_check = ((packet[3] & 0x08) == 0x00); 803 sanity_check = ((packet[3] & 0x08) == 0x00);
804 else if (ic_version == 7 && etd->samples[1] == 0x2A)
805 sanity_check = ((packet[3] & 0x1c) == 0x10);
799 else 806 else
800 sanity_check = ((packet[0] & 0x0c) == 0x04 && 807 sanity_check = ((packet[0] & 0x0c) == 0x04 &&
801 (packet[3] & 0x1c) == 0x10); 808 (packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1116 * Avatar AVIU-145A2 0x361f00 ? clickpad 1123 * Avatar AVIU-145A2 0x361f00 ? clickpad
1117 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1118 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1125 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1126 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1119 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1127 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1120 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1128 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1121 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) 1129 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1167,7 +1175,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1167 struct input_dev *dev = psmouse->dev; 1175 struct input_dev *dev = psmouse->dev;
1168 struct elantech_data *etd = psmouse->private; 1176 struct elantech_data *etd = psmouse->private;
1169 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; 1177 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
1170 unsigned int x_res = 0, y_res = 0; 1178 unsigned int x_res = 31, y_res = 31;
1171 1179
1172 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) 1180 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
1173 return -1; 1181 return -1;
@@ -1232,8 +1240,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1232 /* For X to recognize me as touchpad. */ 1240 /* For X to recognize me as touchpad. */
1233 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); 1241 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
1234 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); 1242 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
1235 input_abs_set_res(dev, ABS_X, x_res);
1236 input_abs_set_res(dev, ABS_Y, y_res);
1237 /* 1243 /*
1238 * range of pressure and width is the same as v2, 1244 * range of pressure and width is the same as v2,
1239 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. 1245 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1252,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1246 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); 1252 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
1247 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); 1253 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
1248 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); 1254 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
1249 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1250 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1251 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, 1255 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
1252 ETP_PMAX_V2, 0, 0); 1256 ETP_PMAX_V2, 0, 0);
1253 /* 1257 /*
@@ -1259,6 +1263,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1259 break; 1263 break;
1260 } 1264 }
1261 1265
1266 input_abs_set_res(dev, ABS_X, x_res);
1267 input_abs_set_res(dev, ABS_Y, y_res);
1268 if (etd->hw_version > 1) {
1269 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1270 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1271 }
1272
1262 etd->y_max = y_max; 1273 etd->y_max = y_max;
1263 etd->width = width; 1274 etd->width = width;
1264 1275
@@ -1648,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
1648 etd->capabilities[0], etd->capabilities[1], 1659 etd->capabilities[0], etd->capabilities[1],
1649 etd->capabilities[2]); 1660 etd->capabilities[2]);
1650 1661
1662 if (etd->hw_version != 1) {
1663 if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
1664 psmouse_err(psmouse, "failed to query sample data\n");
1665 goto init_fail;
1666 }
1667 psmouse_info(psmouse,
1668 "Elan sample query result %02x, %02x, %02x\n",
1669 etd->samples[0], etd->samples[1], etd->samples[2]);
1670 }
1671
1651 if (elantech_set_absolute_mode(psmouse)) { 1672 if (elantech_set_absolute_mode(psmouse)) {
1652 psmouse_err(psmouse, 1673 psmouse_err(psmouse,
1653 "failed to put touchpad into absolute mode.\n"); 1674 "failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
129 unsigned char reg_26; 129 unsigned char reg_26;
130 unsigned char debug; 130 unsigned char debug;
131 unsigned char capabilities[3]; 131 unsigned char capabilities[3];
132 unsigned char samples[3];
132 bool paritycheck; 133 bool paritycheck;
133 bool jumpy_cursor; 134 bool jumpy_cursor;
134 bool reports_pressure; 135 bool reports_pressure;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 35c8d0ceabee..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
1199 ABS_MT_POSITION_Y); 1199 ABS_MT_POSITION_Y);
1200 /* Image sensors can report per-contact pressure */ 1200 /* Image sensors can report per-contact pressure */
1201 input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0); 1201 input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
1202 input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK); 1202 input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
1203 1203
1204 /* Image sensors can signal 4 and 5 finger clicks */ 1204 /* Image sensors can signal 4 and 5 finger clicks */
1205 __set_bit(BTN_TOOL_QUADTAP, dev->keybit); 1205 __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; 1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
1485 1485
1486 psmouse_info(psmouse, 1486 psmouse_info(psmouse,
1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", 1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
1488 SYN_ID_MODEL(priv->identity), 1488 SYN_ID_MODEL(priv->identity),
1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), 1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
1490 priv->model_id, 1490 priv->model_id,
1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c, 1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
1492 priv->board_id, priv->firmware_id); 1492 priv->ext_cap_10, priv->board_id, priv->firmware_id);
1493 1493
1494 set_input_params(psmouse, priv); 1494 set_input_params(psmouse, priv);
1495 1495
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..e36162b28c2a 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/dmi.h>
18#include <linux/i2c.h> 19#include <linux/i2c.h>
19#include <linux/input.h> 20#include <linux/input.h>
20#include <linux/input/mt.h> 21#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
34 int abs_y_max; 35 int abs_y_max;
35 unsigned int max_touch_num; 36 unsigned int max_touch_num;
36 unsigned int int_trigger_type; 37 unsigned int int_trigger_type;
38 bool rotated_screen;
37}; 39};
38 40
39#define GOODIX_MAX_HEIGHT 4096 41#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
60 IRQ_TYPE_LEVEL_HIGH, 62 IRQ_TYPE_LEVEL_HIGH,
61}; 63};
62 64
65/*
66 * Those tablets have their coordinates origin at the bottom right
67 * of the tablet, as if rotated 180 degrees
68 */
69static const struct dmi_system_id rotated_screen[] = {
70#if defined(CONFIG_DMI) && defined(CONFIG_X86)
71 {
72 .ident = "WinBook TW100",
73 .matches = {
74 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
75 DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
76 }
77 },
78 {
79 .ident = "WinBook TW700",
80 .matches = {
81 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
82 DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
83 },
84 },
85#endif
86 {}
87};
88
63/** 89/**
64 * goodix_i2c_read - read data from a register of the i2c slave device. 90 * goodix_i2c_read - read data from a register of the i2c slave device.
65 * 91 *
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
129 int input_y = get_unaligned_le16(&coor_data[3]); 155 int input_y = get_unaligned_le16(&coor_data[3]);
130 int input_w = get_unaligned_le16(&coor_data[5]); 156 int input_w = get_unaligned_le16(&coor_data[5]);
131 157
158 if (ts->rotated_screen) {
159 input_x = ts->abs_x_max - input_x;
160 input_y = ts->abs_y_max - input_y;
161 }
162
132 input_mt_slot(ts->input_dev, id); 163 input_mt_slot(ts->input_dev, id);
133 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); 164 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
134 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); 165 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
223 ts->abs_y_max = GOODIX_MAX_HEIGHT; 254 ts->abs_y_max = GOODIX_MAX_HEIGHT;
224 ts->max_touch_num = GOODIX_MAX_CONTACTS; 255 ts->max_touch_num = GOODIX_MAX_CONTACTS;
225 } 256 }
257
258 ts->rotated_screen = dmi_check_system(rotated_screen);
259 if (ts->rotated_screen)
260 dev_dbg(&ts->client->dev,
261 "Applying '180 degrees rotated screen' quirk\n");
226} 262}
227 263
228/** 264/**
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
627 goto err_out; 627 goto err_out;
628 } 628 }
629 629
630 /* TSC-25 data sheet specifies a delay after the RESET command */
631 msleep(150);
632
630 /* set coordinate output rate */ 633 /* set coordinate output rate */
631 buf[0] = buf[1] = 0xFF; 634 buf[0] = buf[1] = 0xFF;
632 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), 635 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..658ee39e6569 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
76 * Domain for untranslated devices - only allocated 76 * Domain for untranslated devices - only allocated
77 * if iommu=pt passed on kernel cmd line. 77 * if iommu=pt passed on kernel cmd line.
78 */ 78 */
79static struct protection_domain *pt_domain;
80
81static const struct iommu_ops amd_iommu_ops; 79static const struct iommu_ops amd_iommu_ops;
82 80
83static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 81static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
96 struct protection_domain *domain; /* Domain the device is bound to */ 94 struct protection_domain *domain; /* Domain the device is bound to */
97 u16 devid; /* PCI Device ID */ 95 u16 devid; /* PCI Device ID */
98 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
99 bool passthrough; /* Default for device is pt_domain */ 97 bool passthrough; /* Device is identity mapped */
100 struct { 98 struct {
101 bool enabled; 99 bool enabled;
102 int qdep; 100 int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
116struct kmem_cache *amd_iommu_irq_cache; 114struct kmem_cache *amd_iommu_irq_cache;
117 115
118static void update_domain(struct protection_domain *domain); 116static void update_domain(struct protection_domain *domain);
119static int alloc_passthrough_domain(void);
120static int protection_domain_init(struct protection_domain *domain); 117static int protection_domain_init(struct protection_domain *domain);
121 118
122/**************************************************************************** 119/****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
2167 dev_data = get_dev_data(dev); 2164 dev_data = get_dev_data(dev);
2168 2165
2169 if (domain->flags & PD_IOMMUV2_MASK) { 2166 if (domain->flags & PD_IOMMUV2_MASK) {
2170 if (!dev_data->iommu_v2 || !dev_data->passthrough) 2167 if (!dev_data->passthrough)
2171 return -EINVAL; 2168 return -EINVAL;
2172 2169
2173 if (pdev_iommuv2_enable(pdev) != 0) 2170 if (dev_data->iommu_v2) {
2174 return -EINVAL; 2171 if (pdev_iommuv2_enable(pdev) != 0)
2172 return -EINVAL;
2175 2173
2176 dev_data->ats.enabled = true; 2174 dev_data->ats.enabled = true;
2177 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2175 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2178 dev_data->pri_tlp = pci_pri_tlp_required(pdev); 2176 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2177 }
2179 } else if (amd_iommu_iotlb_sup && 2178 } else if (amd_iommu_iotlb_sup &&
2180 pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2179 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2181 dev_data->ats.enabled = true; 2180 dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
2221 do_detach(head); 2220 do_detach(head);
2222 2221
2223 spin_unlock_irqrestore(&domain->lock, flags); 2222 spin_unlock_irqrestore(&domain->lock, flags);
2224
2225 /*
2226 * If we run in passthrough mode the device must be assigned to the
2227 * passthrough domain if it is detached from any other domain.
2228 * Make sure we can deassign from the pt_domain itself.
2229 */
2230 if (dev_data->passthrough &&
2231 (dev_data->domain == NULL && domain != pt_domain))
2232 __attach_device(dev_data, pt_domain);
2233} 2223}
2234 2224
2235/* 2225/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
2249 __detach_device(dev_data); 2239 __detach_device(dev_data);
2250 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2240 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2251 2241
2252 if (domain->flags & PD_IOMMUV2_MASK) 2242 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2253 pdev_iommuv2_disable(to_pci_dev(dev)); 2243 pdev_iommuv2_disable(to_pci_dev(dev));
2254 else if (dev_data->ats.enabled) 2244 else if (dev_data->ats.enabled)
2255 pci_disable_ats(to_pci_dev(dev)); 2245 pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
2287 2277
2288 BUG_ON(!dev_data); 2278 BUG_ON(!dev_data);
2289 2279
2290 if (dev_data->iommu_v2) 2280 if (iommu_pass_through || dev_data->iommu_v2)
2291 iommu_request_dm_for_dev(dev); 2281 iommu_request_dm_for_dev(dev);
2292 2282
2293 /* Domains are initialized for this device - have a look what we ended up with */ 2283 /* Domains are initialized for this device - have a look what we ended up with */
2294 domain = iommu_get_domain_for_dev(dev); 2284 domain = iommu_get_domain_for_dev(dev);
2295 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 2285 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2296 dev_data->passthrough = true; 2286 dev_data->passthrough = true;
2297 dev->archdata.dma_ops = &nommu_dma_ops; 2287 else
2298 } else {
2299 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2288 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2300 }
2301 2289
2302out: 2290out:
2303 iommu_completion_wait(iommu); 2291 iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
2862 2850
2863int __init amd_iommu_init_dma_ops(void) 2851int __init amd_iommu_init_dma_ops(void)
2864{ 2852{
2853 swiotlb = iommu_pass_through ? 1 : 0;
2865 iommu_detected = 1; 2854 iommu_detected = 1;
2866 swiotlb = 0; 2855
2856 /*
2857 * In case we don't initialize SWIOTLB (actually the common case
2858 * when AMD IOMMU is enabled), make sure there are global
2859 * dma_ops set as a fall-back for devices not handled by this
2860 * driver (for example non-PCI devices).
2861 */
2862 if (!swiotlb)
2863 dma_ops = &nommu_dma_ops;
2867 2864
2868 amd_iommu_stats_init(); 2865 amd_iommu_stats_init();
2869 2866
@@ -2947,21 +2944,6 @@ out_err:
2947 return NULL; 2944 return NULL;
2948} 2945}
2949 2946
2950static int alloc_passthrough_domain(void)
2951{
2952 if (pt_domain != NULL)
2953 return 0;
2954
2955 /* allocate passthrough domain */
2956 pt_domain = protection_domain_alloc();
2957 if (!pt_domain)
2958 return -ENOMEM;
2959
2960 pt_domain->mode = PAGE_MODE_NONE;
2961
2962 return 0;
2963}
2964
2965static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) 2947static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2966{ 2948{
2967 struct protection_domain *pdomain; 2949 struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
3222 * 3204 *
3223 *****************************************************************************/ 3205 *****************************************************************************/
3224 3206
3225int __init amd_iommu_init_passthrough(void)
3226{
3227 struct iommu_dev_data *dev_data;
3228 struct pci_dev *dev = NULL;
3229 int ret;
3230
3231 ret = alloc_passthrough_domain();
3232 if (ret)
3233 return ret;
3234
3235 for_each_pci_dev(dev) {
3236 if (!check_device(&dev->dev))
3237 continue;
3238
3239 dev_data = get_dev_data(&dev->dev);
3240 dev_data->passthrough = true;
3241
3242 attach_device(&dev->dev, pt_domain);
3243 }
3244
3245 amd_iommu_stats_init();
3246
3247 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3248
3249 return 0;
3250}
3251
3252/* IOMMUv2 specific functions */ 3207/* IOMMUv2 specific functions */
3253int amd_iommu_register_ppr_notifier(struct notifier_block *nb) 3208int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3254{ 3209{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
3363 struct amd_iommu *iommu; 3318 struct amd_iommu *iommu;
3364 int qdep; 3319 int qdep;
3365 3320
3366 BUG_ON(!dev_data->ats.enabled); 3321 /*
3322 There might be non-IOMMUv2 capable devices in an IOMMUv2
3323 * domain.
3324 */
3325 if (!dev_data->ats.enabled)
3326 continue;
3367 3327
3368 qdep = dev_data->ats.qdep; 3328 qdep = dev_data->ats.qdep;
3369 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3329 iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..a24495eb4e26 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
2026 return true; 2026 return true;
2027} 2027}
2028 2028
2029static int amd_iommu_init_dma(void)
2030{
2031 if (iommu_pass_through)
2032 return amd_iommu_init_passthrough();
2033 else
2034 return amd_iommu_init_dma_ops();
2035}
2036
2037/**************************************************************************** 2029/****************************************************************************
2038 * 2030 *
2039 * AMD IOMMU Initialization State Machine 2031 * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
2073 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2065 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2074 break; 2066 break;
2075 case IOMMU_INTERRUPTS_EN: 2067 case IOMMU_INTERRUPTS_EN:
2076 ret = amd_iommu_init_dma(); 2068 ret = amd_iommu_init_dma_ops();
2077 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2069 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2078 break; 2070 break;
2079 case IOMMU_DMA_OPS: 2071 case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..f7b875bb70d4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
132 132
133static void free_device_state(struct device_state *dev_state) 133static void free_device_state(struct device_state *dev_state)
134{ 134{
135 struct iommu_group *group;
136
135 /* 137 /*
136 * First detach device from domain - No more PRI requests will arrive 138 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain. 139 * from that device after it is unbound from the IOMMUv2 domain.
138 */ 140 */
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); 141 group = iommu_group_get(&dev_state->pdev->dev);
142 if (WARN_ON(!group))
143 return;
144
145 iommu_detach_group(dev_state->domain, group);
146
147 iommu_group_put(group);
140 148
141 /* Everything is down now, free the IOMMUv2 domain */ 149 /* Everything is down now, free the IOMMUv2 domain */
142 iommu_domain_free(dev_state->domain); 150 iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731int amd_iommu_init_device(struct pci_dev *pdev, int pasids) 739int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
732{ 740{
733 struct device_state *dev_state; 741 struct device_state *dev_state;
742 struct iommu_group *group;
734 unsigned long flags; 743 unsigned long flags;
735 int ret, tmp; 744 int ret, tmp;
736 u16 devid; 745 u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
776 if (ret) 785 if (ret)
777 goto out_free_domain; 786 goto out_free_domain;
778 787
779 ret = iommu_attach_device(dev_state->domain, &pdev->dev); 788 group = iommu_group_get(&pdev->dev);
780 if (ret != 0) 789 if (!group)
781 goto out_free_domain; 790 goto out_free_domain;
782 791
792 ret = iommu_attach_group(dev_state->domain, group);
793 if (ret != 0)
794 goto out_drop_group;
795
796 iommu_group_put(group);
797
783 spin_lock_irqsave(&state_lock, flags); 798 spin_lock_irqsave(&state_lock, flags);
784 799
785 if (__get_device_state(devid) != NULL) { 800 if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
794 809
795 return 0; 810 return 0;
796 811
812out_drop_group:
813 iommu_group_put(group);
814
797out_free_domain: 815out_free_domain:
798 iommu_domain_free(dev_state->domain); 816 iommu_domain_free(dev_state->domain);
799 817
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
199 * Stream table. 199 * Stream table.
200 * 200 *
201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries 201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
202 * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) 202 * 2lvl: 128k L1 entries,
203 * 256 lazy entries per table (each table covers a PCI bus)
203 */ 204 */
204#define STRTAB_L1_SZ_SHIFT 16 205#define STRTAB_L1_SZ_SHIFT 20
205#define STRTAB_SPLIT 8 206#define STRTAB_SPLIT 8
206 207
207#define STRTAB_L1_DESC_DWORDS 1 208#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
269#define ARM64_TCR_TG0_SHIFT 14 270#define ARM64_TCR_TG0_SHIFT 14
270#define ARM64_TCR_TG0_MASK 0x3UL 271#define ARM64_TCR_TG0_MASK 0x3UL
271#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 272#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
272#define ARM64_TCR_IRGN0_SHIFT 24 273#define ARM64_TCR_IRGN0_SHIFT 8
273#define ARM64_TCR_IRGN0_MASK 0x3UL 274#define ARM64_TCR_IRGN0_MASK 0x3UL
274#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 275#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
275#define ARM64_TCR_ORGN0_SHIFT 26 276#define ARM64_TCR_ORGN0_SHIFT 10
276#define ARM64_TCR_ORGN0_MASK 0x3UL 277#define ARM64_TCR_ORGN0_MASK 0x3UL
277#define CTXDESC_CD_0_TCR_SH0_SHIFT 12 278#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
278#define ARM64_TCR_SH0_SHIFT 12 279#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
542#define ARM_SMMU_FEAT_HYP (1 << 12) 543#define ARM_SMMU_FEAT_HYP (1 << 12)
543 u32 features; 544 u32 features;
544 545
546#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
547 u32 options;
548
545 struct arm_smmu_cmdq cmdq; 549 struct arm_smmu_cmdq cmdq;
546 struct arm_smmu_evtq evtq; 550 struct arm_smmu_evtq evtq;
547 struct arm_smmu_priq priq; 551 struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
602static DEFINE_SPINLOCK(arm_smmu_devices_lock); 606static DEFINE_SPINLOCK(arm_smmu_devices_lock);
603static LIST_HEAD(arm_smmu_devices); 607static LIST_HEAD(arm_smmu_devices);
604 608
609struct arm_smmu_option_prop {
610 u32 opt;
611 const char *prop;
612};
613
614static struct arm_smmu_option_prop arm_smmu_options[] = {
615 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
616 { 0, NULL},
617};
618
605static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 619static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
606{ 620{
607 return container_of(dom, struct arm_smmu_domain, domain); 621 return container_of(dom, struct arm_smmu_domain, domain);
608} 622}
609 623
624static void parse_driver_options(struct arm_smmu_device *smmu)
625{
626 int i = 0;
627
628 do {
629 if (of_property_read_bool(smmu->dev->of_node,
630 arm_smmu_options[i].prop)) {
631 smmu->options |= arm_smmu_options[i].opt;
632 dev_notice(smmu->dev, "option %s\n",
633 arm_smmu_options[i].prop);
634 }
635 } while (arm_smmu_options[++i].opt);
636}
637
610/* Low-level queue manipulation functions */ 638/* Low-level queue manipulation functions */
611static bool queue_full(struct arm_smmu_queue *q) 639static bool queue_full(struct arm_smmu_queue *q)
612{ 640{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1036 arm_smmu_sync_ste_for_sid(smmu, sid); 1064 arm_smmu_sync_ste_for_sid(smmu, sid);
1037 1065
1038 /* It's likely that we'll want to use the new STE soon */ 1066 /* It's likely that we'll want to use the new STE soon */
1039 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1067 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1068 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1040} 1069}
1041 1070
1042static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) 1071static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1064 return 0; 1093 return 0;
1065 1094
1066 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 1095 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1067 strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; 1096 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1068 1097
1069 desc->span = STRTAB_SPLIT + 1; 1098 desc->span = STRTAB_SPLIT + 1;
1070 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, 1099 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2020{ 2049{
2021 void *strtab; 2050 void *strtab;
2022 u64 reg; 2051 u64 reg;
2023 u32 size; 2052 u32 size, l1size;
2024 int ret; 2053 int ret;
2025 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2054 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2026 2055
2027 /* Calculate the L1 size, capped to the SIDSIZE */ 2056 /* Calculate the L1 size, capped to the SIDSIZE */
2028 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); 2057 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2029 size = min(size, smmu->sid_bits - STRTAB_SPLIT); 2058 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2030 if (size + STRTAB_SPLIT < smmu->sid_bits) 2059 cfg->num_l1_ents = 1 << size;
2060
2061 size += STRTAB_SPLIT;
2062 if (size < smmu->sid_bits)
2031 dev_warn(smmu->dev, 2063 dev_warn(smmu->dev,
2032 "2-level strtab only covers %u/%u bits of SID\n", 2064 "2-level strtab only covers %u/%u bits of SID\n",
2033 size + STRTAB_SPLIT, smmu->sid_bits); 2065 size, smmu->sid_bits);
2034 2066
2035 cfg->num_l1_ents = 1 << size; 2067 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2036 size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2068 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2037 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2038 GFP_KERNEL); 2069 GFP_KERNEL);
2039 if (!strtab) { 2070 if (!strtab) {
2040 dev_err(smmu->dev, 2071 dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2055 ret = arm_smmu_init_l1_strtab(smmu); 2086 ret = arm_smmu_init_l1_strtab(smmu);
2056 if (ret) 2087 if (ret)
2057 dma_free_coherent(smmu->dev, 2088 dma_free_coherent(smmu->dev,
2058 cfg->num_l1_ents * 2089 l1size,
2059 (STRTAB_L1_DESC_DWORDS << 3),
2060 strtab, 2090 strtab,
2061 cfg->strtab_dma); 2091 cfg->strtab_dma);
2062 return ret; 2092 return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2573 if (irq > 0) 2603 if (irq > 0)
2574 smmu->gerr_irq = irq; 2604 smmu->gerr_irq = irq;
2575 2605
2606 parse_driver_options(smmu);
2607
2576 /* Probe the h/w */ 2608 /* Probe the h/w */
2577 ret = arm_smmu_device_probe(smmu); 2609 ret = arm_smmu_device_probe(smmu);
2578 if (ret) 2610 if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1830 1830
1831static void domain_exit(struct dmar_domain *domain) 1831static void domain_exit(struct dmar_domain *domain)
1832{ 1832{
1833 struct dmar_drhd_unit *drhd;
1834 struct intel_iommu *iommu;
1833 struct page *freelist = NULL; 1835 struct page *freelist = NULL;
1834 int i;
1835 1836
1836 /* Domain 0 is reserved, so dont process it */ 1837 /* Domain 0 is reserved, so dont process it */
1837 if (!domain) 1838 if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
1851 1852
1852 /* clear attached or cached domains */ 1853 /* clear attached or cached domains */
1853 rcu_read_lock(); 1854 rcu_read_lock();
1854 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) 1855 for_each_active_iommu(iommu, drhd)
1855 iommu_detach_domain(domain, g_iommus[i]); 1856 if (domain_type_is_vm(domain) ||
1857 test_bit(iommu->seq_id, domain->iommu_bmp))
1858 iommu_detach_domain(domain, iommu);
1856 rcu_read_unlock(); 1859 rcu_read_unlock();
1857 1860
1858 dma_free_pagelist(freelist); 1861 dma_free_pagelist(freelist);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2bc8197..c12bb93334ff 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
68 .irq_mask = irq_chip_mask_parent, 68 .irq_mask = irq_chip_mask_parent,
69 .irq_unmask = irq_chip_unmask_parent, 69 .irq_unmask = irq_chip_unmask_parent,
70 .irq_retrigger = irq_chip_retrigger_hierarchy, 70 .irq_retrigger = irq_chip_retrigger_hierarchy,
71 .irq_set_wake = irq_chip_set_wake_parent, 71 .irq_set_type = irq_chip_set_type_parent,
72 .flags = IRQCHIP_MASK_ON_SUSPEND |
73 IRQCHIP_SKIP_SET_WAKE,
72#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
73 .irq_set_affinity = irq_chip_set_affinity_parent, 75 .irq_set_affinity = irq_chip_set_affinity_parent,
74#endif 76#endif
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1b7e155869f6..c00e2db351ba 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -75,6 +75,13 @@ struct its_node {
75 75
76#define ITS_ITT_ALIGN SZ_256 76#define ITS_ITT_ALIGN SZ_256
77 77
78struct event_lpi_map {
79 unsigned long *lpi_map;
80 u16 *col_map;
81 irq_hw_number_t lpi_base;
82 int nr_lpis;
83};
84
78/* 85/*
79 * The ITS view of a device - belongs to an ITS, a collection, owns an 86 * The ITS view of a device - belongs to an ITS, a collection, owns an
80 * interrupt translation table, and a list of interrupts. 87 * interrupt translation table, and a list of interrupts.
@@ -82,11 +89,8 @@ struct its_node {
82struct its_device { 89struct its_device {
83 struct list_head entry; 90 struct list_head entry;
84 struct its_node *its; 91 struct its_node *its;
85 struct its_collection *collection; 92 struct event_lpi_map event_map;
86 void *itt; 93 void *itt;
87 unsigned long *lpi_map;
88 irq_hw_number_t lpi_base;
89 int nr_lpis;
90 u32 nr_ites; 94 u32 nr_ites;
91 u32 device_id; 95 u32 device_id;
92}; 96};
@@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
99#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 103#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
100#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 104#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
101 105
106static struct its_collection *dev_event_to_col(struct its_device *its_dev,
107 u32 event)
108{
109 struct its_node *its = its_dev->its;
110
111 return its->collections + its_dev->event_map.col_map[event];
112}
113
102/* 114/*
103 * ITS command descriptors - parameters to be encoded in a command 115 * ITS command descriptors - parameters to be encoded in a command
104 * block. 116 * block.
@@ -134,7 +146,7 @@ struct its_cmd_desc {
134 struct { 146 struct {
135 struct its_device *dev; 147 struct its_device *dev;
136 struct its_collection *col; 148 struct its_collection *col;
137 u32 id; 149 u32 event_id;
138 } its_movi_cmd; 150 } its_movi_cmd;
139 151
140 struct { 152 struct {
@@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
241 253
242 its_fixup_cmd(cmd); 254 its_fixup_cmd(cmd);
243 255
244 return desc->its_mapd_cmd.dev->collection; 256 return NULL;
245} 257}
246 258
247static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, 259static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
@@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
260static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, 272static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
261 struct its_cmd_desc *desc) 273 struct its_cmd_desc *desc)
262{ 274{
275 struct its_collection *col;
276
277 col = dev_event_to_col(desc->its_mapvi_cmd.dev,
278 desc->its_mapvi_cmd.event_id);
279
263 its_encode_cmd(cmd, GITS_CMD_MAPVI); 280 its_encode_cmd(cmd, GITS_CMD_MAPVI);
264 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); 281 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
265 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); 282 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
266 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); 283 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
267 its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id); 284 its_encode_collection(cmd, col->col_id);
268 285
269 its_fixup_cmd(cmd); 286 its_fixup_cmd(cmd);
270 287
271 return desc->its_mapvi_cmd.dev->collection; 288 return col;
272} 289}
273 290
274static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, 291static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
275 struct its_cmd_desc *desc) 292 struct its_cmd_desc *desc)
276{ 293{
294 struct its_collection *col;
295
296 col = dev_event_to_col(desc->its_movi_cmd.dev,
297 desc->its_movi_cmd.event_id);
298
277 its_encode_cmd(cmd, GITS_CMD_MOVI); 299 its_encode_cmd(cmd, GITS_CMD_MOVI);
278 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); 300 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
279 its_encode_event_id(cmd, desc->its_movi_cmd.id); 301 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
280 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); 302 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
281 303
282 its_fixup_cmd(cmd); 304 its_fixup_cmd(cmd);
283 305
284 return desc->its_movi_cmd.dev->collection; 306 return col;
285} 307}
286 308
287static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, 309static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
288 struct its_cmd_desc *desc) 310 struct its_cmd_desc *desc)
289{ 311{
312 struct its_collection *col;
313
314 col = dev_event_to_col(desc->its_discard_cmd.dev,
315 desc->its_discard_cmd.event_id);
316
290 its_encode_cmd(cmd, GITS_CMD_DISCARD); 317 its_encode_cmd(cmd, GITS_CMD_DISCARD);
291 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); 318 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
292 its_encode_event_id(cmd, desc->its_discard_cmd.event_id); 319 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
293 320
294 its_fixup_cmd(cmd); 321 its_fixup_cmd(cmd);
295 322
296 return desc->its_discard_cmd.dev->collection; 323 return col;
297} 324}
298 325
299static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, 326static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
300 struct its_cmd_desc *desc) 327 struct its_cmd_desc *desc)
301{ 328{
329 struct its_collection *col;
330
331 col = dev_event_to_col(desc->its_inv_cmd.dev,
332 desc->its_inv_cmd.event_id);
333
302 its_encode_cmd(cmd, GITS_CMD_INV); 334 its_encode_cmd(cmd, GITS_CMD_INV);
303 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); 335 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
304 its_encode_event_id(cmd, desc->its_inv_cmd.event_id); 336 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
305 337
306 its_fixup_cmd(cmd); 338 its_fixup_cmd(cmd);
307 339
308 return desc->its_inv_cmd.dev->collection; 340 return col;
309} 341}
310 342
311static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 343static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
@@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
497 529
498 desc.its_movi_cmd.dev = dev; 530 desc.its_movi_cmd.dev = dev;
499 desc.its_movi_cmd.col = col; 531 desc.its_movi_cmd.col = col;
500 desc.its_movi_cmd.id = id; 532 desc.its_movi_cmd.event_id = id;
501 533
502 its_send_single_command(dev->its, its_build_movi_cmd, &desc); 534 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
503} 535}
@@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
528static inline u32 its_get_event_id(struct irq_data *d) 560static inline u32 its_get_event_id(struct irq_data *d)
529{ 561{
530 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 562 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
531 return d->hwirq - its_dev->lpi_base; 563 return d->hwirq - its_dev->event_map.lpi_base;
532} 564}
533 565
534static void lpi_set_config(struct irq_data *d, bool enable) 566static void lpi_set_config(struct irq_data *d, bool enable)
@@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
583 615
584 target_col = &its_dev->its->collections[cpu]; 616 target_col = &its_dev->its->collections[cpu];
585 its_send_movi(its_dev, target_col, id); 617 its_send_movi(its_dev, target_col, id);
586 its_dev->collection = target_col; 618 its_dev->event_map.col_map[id] = cpu;
587 619
588 return IRQ_SET_MASK_OK_DONE; 620 return IRQ_SET_MASK_OK_DONE;
589} 621}
@@ -713,8 +745,10 @@ out:
713 return bitmap; 745 return bitmap;
714} 746}
715 747
716static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) 748static void its_lpi_free(struct event_lpi_map *map)
717{ 749{
750 int base = map->lpi_base;
751 int nr_ids = map->nr_lpis;
718 int lpi; 752 int lpi;
719 753
720 spin_lock(&lpi_lock); 754 spin_lock(&lpi_lock);
@@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
731 765
732 spin_unlock(&lpi_lock); 766 spin_unlock(&lpi_lock);
733 767
734 kfree(bitmap); 768 kfree(map->lpi_map);
769 kfree(map->col_map);
735} 770}
736 771
737/* 772/*
@@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1099 struct its_device *dev; 1134 struct its_device *dev;
1100 unsigned long *lpi_map; 1135 unsigned long *lpi_map;
1101 unsigned long flags; 1136 unsigned long flags;
1137 u16 *col_map = NULL;
1102 void *itt; 1138 void *itt;
1103 int lpi_base; 1139 int lpi_base;
1104 int nr_lpis; 1140 int nr_lpis;
1105 int nr_ites; 1141 int nr_ites;
1106 int cpu;
1107 int sz; 1142 int sz;
1108 1143
1109 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1144 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1117 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 1152 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1118 itt = kzalloc(sz, GFP_KERNEL); 1153 itt = kzalloc(sz, GFP_KERNEL);
1119 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1154 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1155 if (lpi_map)
1156 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1120 1157
1121 if (!dev || !itt || !lpi_map) { 1158 if (!dev || !itt || !lpi_map || !col_map) {
1122 kfree(dev); 1159 kfree(dev);
1123 kfree(itt); 1160 kfree(itt);
1124 kfree(lpi_map); 1161 kfree(lpi_map);
1162 kfree(col_map);
1125 return NULL; 1163 return NULL;
1126 } 1164 }
1127 1165
1128 dev->its = its; 1166 dev->its = its;
1129 dev->itt = itt; 1167 dev->itt = itt;
1130 dev->nr_ites = nr_ites; 1168 dev->nr_ites = nr_ites;
1131 dev->lpi_map = lpi_map; 1169 dev->event_map.lpi_map = lpi_map;
1132 dev->lpi_base = lpi_base; 1170 dev->event_map.col_map = col_map;
1133 dev->nr_lpis = nr_lpis; 1171 dev->event_map.lpi_base = lpi_base;
1172 dev->event_map.nr_lpis = nr_lpis;
1134 dev->device_id = dev_id; 1173 dev->device_id = dev_id;
1135 INIT_LIST_HEAD(&dev->entry); 1174 INIT_LIST_HEAD(&dev->entry);
1136 1175
@@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1138 list_add(&dev->entry, &its->its_device_list); 1177 list_add(&dev->entry, &its->its_device_list);
1139 raw_spin_unlock_irqrestore(&its->lock, flags); 1178 raw_spin_unlock_irqrestore(&its->lock, flags);
1140 1179
1141 /* Bind the device to the first possible CPU */
1142 cpu = cpumask_first(cpu_online_mask);
1143 dev->collection = &its->collections[cpu];
1144
1145 /* Map device to its ITT */ 1180 /* Map device to its ITT */
1146 its_send_mapd(dev, 1); 1181 its_send_mapd(dev, 1);
1147 1182
@@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1163{ 1198{
1164 int idx; 1199 int idx;
1165 1200
1166 idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis); 1201 idx = find_first_zero_bit(dev->event_map.lpi_map,
1167 if (idx == dev->nr_lpis) 1202 dev->event_map.nr_lpis);
1203 if (idx == dev->event_map.nr_lpis)
1168 return -ENOSPC; 1204 return -ENOSPC;
1169 1205
1170 *hwirq = dev->lpi_base + idx; 1206 *hwirq = dev->event_map.lpi_base + idx;
1171 set_bit(idx, dev->lpi_map); 1207 set_bit(idx, dev->event_map.lpi_map);
1172 1208
1173 return 0; 1209 return 0;
1174} 1210}
@@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1288 irq_domain_set_hwirq_and_chip(domain, virq + i, 1324 irq_domain_set_hwirq_and_chip(domain, virq + i,
1289 hwirq, &its_irq_chip, its_dev); 1325 hwirq, &its_irq_chip, its_dev);
1290 dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", 1326 dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
1291 (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i); 1327 (int)(hwirq - its_dev->event_map.lpi_base),
1328 (int)hwirq, virq + i);
1292 } 1329 }
1293 1330
1294 return 0; 1331 return 0;
@@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
1300 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1337 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1301 u32 event = its_get_event_id(d); 1338 u32 event = its_get_event_id(d);
1302 1339
1340 /* Bind the LPI to the first possible CPU */
1341 its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
1342
1303 /* Map the GIC IRQ and event to the device */ 1343 /* Map the GIC IRQ and event to the device */
1304 its_send_mapvi(its_dev, d->hwirq, event); 1344 its_send_mapvi(its_dev, d->hwirq, event);
1305} 1345}
@@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1327 u32 event = its_get_event_id(data); 1367 u32 event = its_get_event_id(data);
1328 1368
1329 /* Mark interrupt index as unused */ 1369 /* Mark interrupt index as unused */
1330 clear_bit(event, its_dev->lpi_map); 1370 clear_bit(event, its_dev->event_map.lpi_map);
1331 1371
1332 /* Nuke the entry in the domain */ 1372 /* Nuke the entry in the domain */
1333 irq_domain_reset_irq_data(data); 1373 irq_domain_reset_irq_data(data);
1334 } 1374 }
1335 1375
1336 /* If all interrupts have been freed, start mopping the floor */ 1376 /* If all interrupts have been freed, start mopping the floor */
1337 if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) { 1377 if (bitmap_empty(its_dev->event_map.lpi_map,
1338 its_lpi_free(its_dev->lpi_map, 1378 its_dev->event_map.nr_lpis)) {
1339 its_dev->lpi_base, 1379 its_lpi_free(&its_dev->event_map);
1340 its_dev->nr_lpis);
1341 1380
1342 /* Unmap device/itt */ 1381 /* Unmap device/itt */
1343 its_send_mapd(its_dev, 0); 1382 its_send_mapd(its_dev, 0);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 8d7e1c8b6d56..4dd88264dff5 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1055,7 +1055,7 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
1055 1055
1056 processor = (struct acpi_madt_generic_interrupt *)header; 1056 processor = (struct acpi_madt_generic_interrupt *)header;
1057 1057
1058 if (BAD_MADT_ENTRY(processor, end)) 1058 if (BAD_MADT_GICC_ENTRY(processor, end))
1059 return -EINVAL; 1059 return -EINVAL;
1060 1060
1061 /* 1061 /*
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 4400edd1a6c7..ff4be0515a0d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -257,16 +257,6 @@ int gic_get_c0_fdc_int(void)
257 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 257 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
258 } 258 }
259 259
260 /*
261 * Some cores claim the FDC is routable but it doesn't actually seem to
262 * be connected.
263 */
264 switch (current_cpu_type()) {
265 case CPU_INTERAPTIV:
266 case CPU_PROAPTIV:
267 return -1;
268 }
269
270 return irq_create_mapping(gic_irq_domain, 260 return irq_create_mapping(gic_irq_domain,
271 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 261 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
272} 262}
@@ -548,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
548 538
549static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
550{ 540{
551 smp_call_function_interrupt(); 541 generic_smp_call_function_interrupt();
552 542
553 return IRQ_HANDLED; 543 return IRQ_HANDLED;
554} 544}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index a45121546caf..acb721b31bcf 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -2,7 +2,7 @@
2 * SPEAr platform shared irq layer source file 2 * SPEAr platform shared irq layer source file
3 * 3 *
4 * Copyright (C) 2009-2012 ST Microelectronics 4 * Copyright (C) 2009-2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * Copyright (C) 2012 ST Microelectronics 7 * Copyright (C) 2012 ST Microelectronics
8 * Shiraz Hashim <shiraz.linux.kernel@gmail.com> 8 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
524 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
525 atomic_set(&cs->hw.ser->refcnt, 1); 525 atomic_set(&cs->hw.ser->refcnt, 1);
526 init_completion(&cs->hw.ser->dead_cmp); 526 init_completion(&cs->hw.ser->dead_cmp);
527
528 tty->disc_data = cs; 527 tty->disc_data = cs;
529 528
529 /* Set the amount of data we're willing to receive per call
530 * from the hardware driver to half of the input buffer size
531 * to leave some reserve.
532 * Note: We don't do flow control towards the hardware driver.
533 * If more data is received than will fit into the input buffer,
534 * it will be dropped and an error will be logged. This should
535 * never happen as the device is slow and the buffer size ample.
536 */
537 tty->receive_room = RBUFSIZE/2;
538
530 /* OK.. Initialization of the datastructures and the HW is done.. Now 539 /* OK.. Initialization of the datastructures and the HW is done.. Now
531 * startup system and notify the LL that we are ready to run 540 * startup system and notify the LL that we are ready to run
532 */ 541 */
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
598} 607}
599 608
600/* 609/*
601 * Read on the tty.
602 * Unused, received data goes only to the Gigaset driver.
603 */
604static ssize_t
605gigaset_tty_read(struct tty_struct *tty, struct file *file,
606 unsigned char __user *buf, size_t count)
607{
608 return -EAGAIN;
609}
610
611/*
612 * Write on the tty.
613 * Unused, transmit data comes only from the Gigaset driver.
614 */
615static ssize_t
616gigaset_tty_write(struct tty_struct *tty, struct file *file,
617 const unsigned char *buf, size_t count)
618{
619 return -EAGAIN;
620}
621
622/*
623 * Ioctl on the tty. 610 * Ioctl on the tty.
624 * Called in process context only. 611 * Called in process context only.
625 * May be re-entered by multiple ioctl calling threads. 612 * May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
752 .open = gigaset_tty_open, 739 .open = gigaset_tty_open,
753 .close = gigaset_tty_close, 740 .close = gigaset_tty_close,
754 .hangup = gigaset_tty_hangup, 741 .hangup = gigaset_tty_hangup,
755 .read = gigaset_tty_read,
756 .write = gigaset_tty_write,
757 .ioctl = gigaset_tty_ioctl, 742 .ioctl = gigaset_tty_ioctl,
758 .receive_buf = gigaset_tty_receive, 743 .receive_buf = gigaset_tty_receive,
759 .write_wakeup = gigaset_tty_wakeup, 744 .write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/init.h> 10#include <linux/module.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13 13
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..bfec3bdfe598 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
259 the ciphers you're going to use in the cryptoapi configuration. 259 the ciphers you're going to use in the cryptoapi configuration.
260 260
261 For further information on dm-crypt and userspace tools see: 261 For further information on dm-crypt and userspace tools see:
262 <http://code.google.com/p/cryptsetup/wiki/DMCrypt> 262 <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
263 263
264 To compile this code as a module, choose M here: the module will 264 To compile this code as a module, choose M here: the module will
265 be called dm-crypt. 265 be called dm-crypt.
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index a08e3eeac3c5..79a6d63e8ed3 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
320do { \ 320do { \
321 set_closure_fn(_cl, _fn, _wq); \ 321 set_closure_fn(_cl, _fn, _wq); \
322 closure_sub(_cl, CLOSURE_RUNNING + 1); \ 322 closure_sub(_cl, CLOSURE_RUNNING + 1); \
323 return; \
324} while (0) 323} while (0)
325 324
326/** 325/**
@@ -349,7 +348,6 @@ do { \
349do { \ 348do { \
350 set_closure_fn(_cl, _fn, _wq); \ 349 set_closure_fn(_cl, _fn, _wq); \
351 closure_queue(_cl); \ 350 closure_queue(_cl); \
352 return; \
353} while (0) 351} while (0)
354 352
355/** 353/**
@@ -365,7 +363,6 @@ do { \
365do { \ 363do { \
366 set_closure_fn(_cl, _destructor, NULL); \ 364 set_closure_fn(_cl, _destructor, NULL); \
367 closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \ 365 closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
368 return; \
369} while (0) 366} while (0)
370 367
371/** 368/**
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index cb64e64a4789..bf6a9ca18403 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
105 } while (n != bio); 105 } while (n != bio);
106 106
107 continue_at(&s->cl, bch_bio_submit_split_done, NULL); 107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
108 return;
108submit: 109submit:
109 generic_make_request(bio); 110 generic_make_request(bio);
110} 111}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ce64fc851251..418607a6ba33 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
592 592
593 if (!w->need_write) { 593 if (!w->need_write) {
594 closure_return_with_destructor(cl, journal_write_unlock); 594 closure_return_with_destructor(cl, journal_write_unlock);
595 return;
595 } else if (journal_full(&c->journal)) { 596 } else if (journal_full(&c->journal)) {
596 journal_reclaim(c); 597 journal_reclaim(c);
597 spin_unlock(&c->journal.lock); 598 spin_unlock(&c->journal.lock);
598 599
599 btree_flush_write(c); 600 btree_flush_write(c);
600 continue_at(cl, journal_write, system_wq); 601 continue_at(cl, journal_write, system_wq);
602 return;
601 } 603 }
602 604
603 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c)); 605 c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 4afb2d26b148..f292790997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
88 if (journal_ref) 88 if (journal_ref)
89 atomic_dec_bug(journal_ref); 89 atomic_dec_bug(journal_ref);
90 90
91 if (!op->insert_data_done) 91 if (!op->insert_data_done) {
92 continue_at(cl, bch_data_insert_start, op->wq); 92 continue_at(cl, bch_data_insert_start, op->wq);
93 return;
94 }
93 95
94 bch_keylist_free(&op->insert_keys); 96 bch_keylist_free(&op->insert_keys);
95 closure_return(cl); 97 closure_return(cl);
@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
216 /* 1 for the device pointer and 1 for the chksum */ 218 /* 1 for the device pointer and 1 for the chksum */
217 if (bch_keylist_realloc(&op->insert_keys, 219 if (bch_keylist_realloc(&op->insert_keys,
218 3 + (op->csum ? 1 : 0), 220 3 + (op->csum ? 1 : 0),
219 op->c)) 221 op->c)) {
220 continue_at(cl, bch_data_insert_keys, op->wq); 222 continue_at(cl, bch_data_insert_keys, op->wq);
223 return;
224 }
221 225
222 k = op->insert_keys.top; 226 k = op->insert_keys.top;
223 bkey_init(k); 227 bkey_init(k);
@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
255 259
256 op->insert_data_done = true; 260 op->insert_data_done = true;
257 continue_at(cl, bch_data_insert_keys, op->wq); 261 continue_at(cl, bch_data_insert_keys, op->wq);
262 return;
258err: 263err:
259 /* bch_alloc_sectors() blocks if s->writeback = true */ 264 /* bch_alloc_sectors() blocks if s->writeback = true */
260 BUG_ON(op->writeback); 265 BUG_ON(op->writeback);
@@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl)
576 ret = bch_btree_map_keys(&s->op, s->iop.c, 581 ret = bch_btree_map_keys(&s->op, s->iop.c,
577 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 582 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
578 cache_lookup_fn, MAP_END_KEY); 583 cache_lookup_fn, MAP_END_KEY);
579 if (ret == -EAGAIN) 584 if (ret == -EAGAIN) {
580 continue_at(cl, cache_lookup, bcache_wq); 585 continue_at(cl, cache_lookup, bcache_wq);
586 return;
587 }
581 588
582 closure_return(cl); 589 closure_return(cl);
583} 590}
@@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1085 continue_at_nobarrier(&s->cl, 1092 continue_at_nobarrier(&s->cl,
1086 flash_dev_nodata, 1093 flash_dev_nodata,
1087 bcache_wq); 1094 bcache_wq);
1095 return;
1088 } else if (rw) { 1096 } else if (rw) {
1089 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1097 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1090 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1098 &KEY(d->id, bio->bi_iter.bi_sector, 0),
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
494 bitmap_super_t *sb; 494 bitmap_super_t *sb;
495 unsigned long chunksize, daemon_sleep, write_behind; 495 unsigned long chunksize, daemon_sleep, write_behind;
496 496
497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL); 497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
498 if (bitmap->storage.sb_page == NULL) 498 if (bitmap->storage.sb_page == NULL)
499 return -ENOMEM; 499 return -ENOMEM;
500 bitmap->storage.sb_page->index = 0; 500 bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
541 sb->state = cpu_to_le32(bitmap->flags); 541 sb->state = cpu_to_le32(bitmap->flags);
542 bitmap->events_cleared = bitmap->mddev->events; 542 bitmap->events_cleared = bitmap->mddev->events;
543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
544 bitmap->mddev->bitmap_info.nodes = 0;
544 545
545 kunmap_atomic(sb); 546 kunmap_atomic(sb);
546 547
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
558 unsigned long sectors_reserved = 0; 559 unsigned long sectors_reserved = 0;
559 int err = -EINVAL; 560 int err = -EINVAL;
560 struct page *sb_page; 561 struct page *sb_page;
562 loff_t offset = bitmap->mddev->bitmap_info.offset;
561 563
562 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 564 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
563 chunksize = 128 * 1024 * 1024; 565 chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
584 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 586 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
585 /* to 4k blocks */ 587 /* to 4k blocks */
586 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 588 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
587 bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); 589 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
588 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 590 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
589 bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); 591 bitmap->cluster_slot, offset);
590 } 592 }
591 593
592 if (bitmap->storage.file) { 594 if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
597 bitmap, bytes, sb_page); 599 bitmap, bytes, sb_page);
598 } else { 600 } else {
599 err = read_sb_page(bitmap->mddev, 601 err = read_sb_page(bitmap->mddev,
600 bitmap->mddev->bitmap_info.offset, 602 offset,
601 sb_page, 603 sb_page,
602 0, sizeof(bitmap_super_t)); 604 0, sizeof(bitmap_super_t));
603 } 605 }
@@ -611,8 +613,16 @@ re_read:
611 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 613 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
612 write_behind = le32_to_cpu(sb->write_behind); 614 write_behind = le32_to_cpu(sb->write_behind);
613 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 615 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
614 nodes = le32_to_cpu(sb->nodes); 616 /* XXX: This is a hack to ensure that we don't use clustering
615 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 617 * in case:
618 * - dm-raid is in use and
619 * - the nodes written in bitmap_sb is erroneous.
620 */
621 if (!bitmap->mddev->sync_super) {
622 nodes = le32_to_cpu(sb->nodes);
623 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
624 sb->cluster_name, 64);
625 }
616 626
617 /* verify that the bitmap-specific fields are valid */ 627 /* verify that the bitmap-specific fields are valid */
618 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 628 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
671 kunmap_atomic(sb); 681 kunmap_atomic(sb);
672 /* Assiging chunksize is required for "re_read" */ 682 /* Assiging chunksize is required for "re_read" */
673 bitmap->mddev->bitmap_info.chunksize = chunksize; 683 bitmap->mddev->bitmap_info.chunksize = chunksize;
674 if (nodes && (bitmap->cluster_slot < 0)) { 684 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
675 err = md_setup_cluster(bitmap->mddev, nodes); 685 err = md_setup_cluster(bitmap->mddev, nodes);
676 if (err) { 686 if (err) {
677 pr_err("%s: Could not setup cluster service (%d)\n", 687 pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1866 if (IS_ERR(bitmap)) 1876 if (IS_ERR(bitmap))
1867 return PTR_ERR(bitmap); 1877 return PTR_ERR(bitmap);
1868 1878
1869 rv = bitmap_read_sb(bitmap);
1870 if (rv)
1871 goto err;
1872
1873 rv = bitmap_init_from_disk(bitmap, 0); 1879 rv = bitmap_init_from_disk(bitmap, 0);
1874 if (rv) 1880 if (rv)
1875 goto err; 1881 goto err;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1472MODULE_LICENSE("GPL"); 1472MODULE_LICENSE("GPL");
1473MODULE_DESCRIPTION("mq cache policy"); 1473MODULE_DESCRIPTION("mq cache policy");
1474
1475MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..200366c62231 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1686 1686
1687 if (from_cblock(cache_size)) { 1687 if (from_cblock(cache_size)) {
1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); 1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1689 if (!mq->cache_hit_bits && mq->cache_hit_bits) { 1689 if (!mq->cache_hit_bits) {
1690 DMERR("couldn't allocate cache hit bitset"); 1690 DMERR("couldn't allocate cache hit bitset");
1691 goto bad_cache_hit_bits; 1691 goto bad_cache_hit_bits;
1692 } 1692 }
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1790MODULE_LICENSE("GPL"); 1790MODULE_LICENSE("GPL");
1791MODULE_DESCRIPTION("smq cache policy"); 1791MODULE_DESCRIPTION("smq cache policy");
1792
1793MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b4e1756b169..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1947,6 +1947,7 @@ static int commit_if_needed(struct cache *cache)
1947 1947
1948static void process_deferred_bios(struct cache *cache) 1948static void process_deferred_bios(struct cache *cache)
1949{ 1949{
1950 bool prealloc_used = false;
1950 unsigned long flags; 1951 unsigned long flags;
1951 struct bio_list bios; 1952 struct bio_list bios;
1952 struct bio *bio; 1953 struct bio *bio;
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1966 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1967 * prepared mappings to process. 1968 * prepared mappings to process.
1968 */ 1969 */
1970 prealloc_used = true;
1969 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1970 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1971 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1983,11 +1985,13 @@ static void process_deferred_bios(struct cache *cache)
1983 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1984 } 1986 }
1985 1987
1986 prealloc_free_structs(cache, &structs); 1988 if (prealloc_used)
1989 prealloc_free_structs(cache, &structs);
1987} 1990}
1988 1991
1989static void process_deferred_cells(struct cache *cache) 1992static void process_deferred_cells(struct cache *cache)
1990{ 1993{
1994 bool prealloc_used = false;
1991 unsigned long flags; 1995 unsigned long flags;
1992 struct dm_bio_prison_cell *cell, *tmp; 1996 struct dm_bio_prison_cell *cell, *tmp;
1993 struct list_head cells; 1997 struct list_head cells;
@@ -2007,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
2007 * this bio might require one, we pause until there are some 2011 * this bio might require one, we pause until there are some
2008 * prepared mappings to process. 2012 * prepared mappings to process.
2009 */ 2013 */
2014 prealloc_used = true;
2010 if (prealloc_data_structs(cache, &structs)) { 2015 if (prealloc_data_structs(cache, &structs)) {
2011 spin_lock_irqsave(&cache->lock, flags); 2016 spin_lock_irqsave(&cache->lock, flags);
2012 list_splice(&cells, &cache->deferred_cells); 2017 list_splice(&cells, &cache->deferred_cells);
@@ -2017,7 +2022,8 @@ static void process_deferred_cells(struct cache *cache)
2017 process_cell(cache, &structs, cell); 2022 process_cell(cache, &structs, cell);
2018 } 2023 }
2019 2024
2020 prealloc_free_structs(cache, &structs); 2025 if (prealloc_used)
2026 prealloc_free_structs(cache, &structs);
2021} 2027}
2022 2028
2023static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) 2029static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -2062,7 +2068,7 @@ static void process_deferred_writethrough_bios(struct cache *cache)
2062 2068
2063static void writeback_some_dirty_blocks(struct cache *cache) 2069static void writeback_some_dirty_blocks(struct cache *cache)
2064{ 2070{
2065 int r = 0; 2071 bool prealloc_used = false;
2066 dm_oblock_t oblock; 2072 dm_oblock_t oblock;
2067 dm_cblock_t cblock; 2073 dm_cblock_t cblock;
2068 struct prealloc structs; 2074 struct prealloc structs;
@@ -2072,15 +2078,12 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2072 memset(&structs, 0, sizeof(structs)); 2078 memset(&structs, 0, sizeof(structs));
2073 2079
2074 while (spare_migration_bandwidth(cache)) { 2080 while (spare_migration_bandwidth(cache)) {
2075 if (prealloc_data_structs(cache, &structs)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2076 break; 2082 break; /* no work to do */
2077
2078 r = policy_writeback_work(cache->policy, &oblock, &cblock, busy);
2079 if (r)
2080 break;
2081 2083
2082 r = get_cell(cache, oblock, &structs, &old_ocell); 2084 prealloc_used = true;
2083 if (r) { 2085 if (prealloc_data_structs(cache, &structs) ||
2086 get_cell(cache, oblock, &structs, &old_ocell)) {
2084 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
2085 break; 2088 break;
2086 } 2089 }
@@ -2088,7 +2091,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2088 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
2089 } 2092 }
2090 2093
2091 prealloc_free_structs(cache, &structs); 2094 if (prealloc_used)
2095 prealloc_free_structs(cache, &structs);
2092} 2096}
2093 2097
2094/*---------------------------------------------------------------- 2098/*----------------------------------------------------------------
@@ -3496,7 +3500,7 @@ static void cache_resume(struct dm_target *ti)
3496 * <#demotions> <#promotions> <#dirty> 3500 * <#demotions> <#promotions> <#dirty>
3497 * <#features> <features>* 3501 * <#features> <features>*
3498 * <#core args> <core args> 3502 * <#core args> <core args>
3499 * <policy name> <#policy args> <policy args>* <cache metadata mode> 3503 * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
3500 */ 3504 */
3501static void cache_status(struct dm_target *ti, status_type_t type, 3505static void cache_status(struct dm_target *ti, status_type_t type,
3502 unsigned status_flags, char *result, unsigned maxlen) 3506 unsigned status_flags, char *result, unsigned maxlen)
@@ -3582,6 +3586,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
3582 else 3586 else
3583 DMEMIT("rw "); 3587 DMEMIT("rw ");
3584 3588
3589 if (dm_cache_metadata_needs_check(cache->cmd))
3590 DMEMIT("needs_check ");
3591 else
3592 DMEMIT("- ");
3593
3585 break; 3594 break;
3586 3595
3587 case STATUSTYPE_TABLE: 3596 case STATUSTYPE_TABLE:
@@ -3820,7 +3829,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3820 3829
3821static struct target_type cache_target = { 3830static struct target_type cache_target = {
3822 .name = "cache", 3831 .name = "cache",
3823 .version = {1, 7, 0}, 3832 .version = {1, 8, 0},
3824 .module = THIS_MODULE, 3833 .module = THIS_MODULE,
3825 .ctr = cache_ctr, 3834 .ctr = cache_ctr,
3826 .dtr = cache_dtr, 3835 .dtr = cache_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1293 return r; 1293 return r;
1294 1294
1295 disk_super = dm_block_data(copy); 1295 disk_super = dm_block_data(copy);
1296 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); 1296 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1297 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); 1297 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1298 dm_sm_dec_block(pmd->metadata_sm, held_root); 1298 dm_sm_dec_block(pmd->metadata_sm, held_root);
1299 1299
1300 return dm_tm_unlock(pmd->tm, copy); 1300 return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c33f61a4cc28..d2bbe8cc1e97 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -18,6 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/vmalloc.h>
21#include <linux/sort.h> 22#include <linux/sort.h>
22#include <linux/rbtree.h> 23#include <linux/rbtree.h>
23 24
@@ -268,7 +269,7 @@ struct pool {
268 process_mapping_fn process_prepared_mapping; 269 process_mapping_fn process_prepared_mapping;
269 process_mapping_fn process_prepared_discard; 270 process_mapping_fn process_prepared_discard;
270 271
271 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE]; 272 struct dm_bio_prison_cell **cell_sort_array;
272}; 273};
273 274
274static enum pool_mode get_pool_mode(struct pool *pool); 275static enum pool_mode get_pool_mode(struct pool *pool);
@@ -665,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
665 requeue_deferred_cells(tc); 666 requeue_deferred_cells(tc);
666} 667}
667 668
668static void error_retry_list(struct pool *pool) 669static void error_retry_list_with_code(struct pool *pool, int error)
669{ 670{
670 struct thin_c *tc; 671 struct thin_c *tc;
671 672
672 rcu_read_lock(); 673 rcu_read_lock();
673 list_for_each_entry_rcu(tc, &pool->active_thins, list) 674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
674 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); 675 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
675 rcu_read_unlock(); 676 rcu_read_unlock();
676} 677}
677 678
679static void error_retry_list(struct pool *pool)
680{
681 return error_retry_list_with_code(pool, -EIO);
682}
683
678/* 684/*
679 * This section of code contains the logic for processing a thin device's IO. 685 * This section of code contains the logic for processing a thin device's IO.
680 * Much of the code depends on pool object resources (lists, workqueues, etc) 686 * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2281,18 +2287,23 @@ static void do_waker(struct work_struct *ws)
2281 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 2287 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
2282} 2288}
2283 2289
2290static void notify_of_pool_mode_change_to_oods(struct pool *pool);
2291
2284/* 2292/*
2285 * We're holding onto IO to allow userland time to react. After the 2293 * We're holding onto IO to allow userland time to react. After the
2286 * timeout either the pool will have been resized (and thus back in 2294 * timeout either the pool will have been resized (and thus back in
2287 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. 2295 * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
2288 */ 2296 */
2289static void do_no_space_timeout(struct work_struct *ws) 2297static void do_no_space_timeout(struct work_struct *ws)
2290{ 2298{
2291 struct pool *pool = container_of(to_delayed_work(ws), struct pool, 2299 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2292 no_space_timeout); 2300 no_space_timeout);
2293 2301
2294 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) 2302 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2295 set_pool_mode(pool, PM_READ_ONLY); 2303 pool->pf.error_if_no_space = true;
2304 notify_of_pool_mode_change_to_oods(pool);
2305 error_retry_list_with_code(pool, -ENOSPC);
2306 }
2296} 2307}
2297 2308
2298/*----------------------------------------------------------------*/ 2309/*----------------------------------------------------------------*/
@@ -2370,6 +2381,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2370 dm_device_name(pool->pool_md), new_mode); 2381 dm_device_name(pool->pool_md), new_mode);
2371} 2382}
2372 2383
2384static void notify_of_pool_mode_change_to_oods(struct pool *pool)
2385{
2386 if (!pool->pf.error_if_no_space)
2387 notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
2388 else
2389 notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
2390}
2391
2373static bool passdown_enabled(struct pool_c *pt) 2392static bool passdown_enabled(struct pool_c *pt)
2374{ 2393{
2375 return pt->adjusted_pf.discard_passdown; 2394 return pt->adjusted_pf.discard_passdown;
@@ -2454,7 +2473,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
2454 * frequently seeing this mode. 2473 * frequently seeing this mode.
2455 */ 2474 */
2456 if (old_mode != new_mode) 2475 if (old_mode != new_mode)
2457 notify_of_pool_mode_change(pool, "out-of-data-space"); 2476 notify_of_pool_mode_change_to_oods(pool);
2458 pool->process_bio = process_bio_read_only; 2477 pool->process_bio = process_bio_read_only;
2459 pool->process_discard = process_discard_bio; 2478 pool->process_discard = process_discard_bio;
2460 pool->process_cell = process_cell_read_only; 2479 pool->process_cell = process_cell_read_only;
@@ -2777,6 +2796,7 @@ static void __pool_destroy(struct pool *pool)
2777{ 2796{
2778 __pool_table_remove(pool); 2797 __pool_table_remove(pool);
2779 2798
2799 vfree(pool->cell_sort_array);
2780 if (dm_pool_metadata_close(pool->pmd) < 0) 2800 if (dm_pool_metadata_close(pool->pmd) < 0)
2781 DMWARN("%s: dm_pool_metadata_close() failed.", __func__); 2801 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2782 2802
@@ -2889,6 +2909,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2889 goto bad_mapping_pool; 2909 goto bad_mapping_pool;
2890 } 2910 }
2891 2911
2912 pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
2913 if (!pool->cell_sort_array) {
2914 *error = "Error allocating cell sort array";
2915 err_p = ERR_PTR(-ENOMEM);
2916 goto bad_sort_array;
2917 }
2918
2892 pool->ref_count = 1; 2919 pool->ref_count = 1;
2893 pool->last_commit_jiffies = jiffies; 2920 pool->last_commit_jiffies = jiffies;
2894 pool->pool_md = pool_md; 2921 pool->pool_md = pool_md;
@@ -2897,6 +2924,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2897 2924
2898 return pool; 2925 return pool;
2899 2926
2927bad_sort_array:
2928 mempool_destroy(pool->mapping_pool);
2900bad_mapping_pool: 2929bad_mapping_pool:
2901 dm_deferred_set_destroy(pool->all_io_ds); 2930 dm_deferred_set_destroy(pool->all_io_ds);
2902bad_all_io_ds: 2931bad_all_io_ds:
@@ -3714,6 +3743,7 @@ static void emit_flags(struct pool_features *pf, char *result,
3714 * Status line is: 3743 * Status line is:
3715 * <transaction id> <used metadata sectors>/<total metadata sectors> 3744 * <transaction id> <used metadata sectors>/<total metadata sectors>
3716 * <used data sectors>/<total data sectors> <held metadata root> 3745 * <used data sectors>/<total data sectors> <held metadata root>
3746 * <pool mode> <discard config> <no space config> <needs_check>
3717 */ 3747 */
3718static void pool_status(struct dm_target *ti, status_type_t type, 3748static void pool_status(struct dm_target *ti, status_type_t type,
3719 unsigned status_flags, char *result, unsigned maxlen) 3749 unsigned status_flags, char *result, unsigned maxlen)
@@ -3815,6 +3845,11 @@ static void pool_status(struct dm_target *ti, status_type_t type,
3815 else 3845 else
3816 DMEMIT("queue_if_no_space "); 3846 DMEMIT("queue_if_no_space ");
3817 3847
3848 if (dm_pool_metadata_needs_check(pool->pmd))
3849 DMEMIT("needs_check ");
3850 else
3851 DMEMIT("- ");
3852
3818 break; 3853 break;
3819 3854
3820 case STATUSTYPE_TABLE: 3855 case STATUSTYPE_TABLE:
@@ -3918,7 +3953,7 @@ static struct target_type pool_target = {
3918 .name = "thin-pool", 3953 .name = "thin-pool",
3919 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 3954 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3920 DM_TARGET_IMMUTABLE, 3955 DM_TARGET_IMMUTABLE,
3921 .version = {1, 15, 0}, 3956 .version = {1, 16, 0},
3922 .module = THIS_MODULE, 3957 .module = THIS_MODULE,
3923 .ctr = pool_ctr, 3958 .ctr = pool_ctr,
3924 .dtr = pool_dtr, 3959 .dtr = pool_dtr,
@@ -4305,7 +4340,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
4305 4340
4306static struct target_type thin_target = { 4341static struct target_type thin_target = {
4307 .name = "thin", 4342 .name = "thin",
4308 .version = {1, 15, 0}, 4343 .version = {1, 16, 0},
4309 .module = THIS_MODULE, 4344 .module = THIS_MODULE,
4310 .ctr = thin_ctr, 4345 .ctr = thin_ctr,
4311 .dtr = thin_dtr, 4346 .dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f331d888e7f5..0d7ab20c58df 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
1067 */ 1067 */
1068static void rq_completed(struct mapped_device *md, int rw, bool run_queue) 1068static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1069{ 1069{
1070 int nr_requests_pending;
1071
1072 atomic_dec(&md->pending[rw]); 1070 atomic_dec(&md->pending[rw]);
1073 1071
1074 /* nudge anyone waiting on suspend queue */ 1072 /* nudge anyone waiting on suspend queue */
1075 nr_requests_pending = md_in_flight(md); 1073 if (!md_in_flight(md))
1076 if (!nr_requests_pending)
1077 wake_up(&md->wait); 1074 wake_up(&md->wait);
1078 1075
1079 /* 1076 /*
@@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
1085 if (run_queue) { 1082 if (run_queue) {
1086 if (md->queue->mq_ops) 1083 if (md->queue->mq_ops)
1087 blk_mq_run_hw_queues(md->queue, true); 1084 blk_mq_run_hw_queues(md->queue, true);
1088 else if (!nr_requests_pending || 1085 else
1089 (nr_requests_pending >= md->queue->nr_congestion_on))
1090 blk_run_queue_async(md->queue); 1086 blk_run_queue_async(md->queue);
1091 } 1087 }
1092 1088
@@ -1733,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
1733 struct mapped_device *md = q->queuedata; 1729 struct mapped_device *md = q->queuedata;
1734 struct dm_table *map = dm_get_live_table_fast(md); 1730 struct dm_table *map = dm_get_live_table_fast(md);
1735 struct dm_target *ti; 1731 struct dm_target *ti;
1736 sector_t max_sectors, max_size = 0; 1732 sector_t max_sectors;
1733 int max_size = 0;
1737 1734
1738 if (unlikely(!map)) 1735 if (unlikely(!map))
1739 goto out; 1736 goto out;
@@ -1746,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
1746 * Find maximum amount of I/O that won't need splitting 1743 * Find maximum amount of I/O that won't need splitting
1747 */ 1744 */
1748 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1745 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1749 (sector_t) queue_max_sectors(q)); 1746 (sector_t) BIO_MAX_SECTORS);
1750 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1747 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1751 1748 if (max_size < 0)
1752 /* 1749 max_size = 0;
1753 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1754 * to the targets' merge function since it holds sectors not bytes).
1755 * Just doing this as an interim fix for stable@ because the more
1756 * comprehensive cleanup of switching to sector_t will impact every
1757 * DM target that implements a ->merge hook.
1758 */
1759 if (max_size > INT_MAX)
1760 max_size = INT_MAX;
1761 1750
1762 /* 1751 /*
1763 * merge_bvec_fn() returns number of bytes 1752 * merge_bvec_fn() returns number of bytes
@@ -1765,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
1765 * max is precomputed maximal io size 1754 * max is precomputed maximal io size
1766 */ 1755 */
1767 if (max_size && ti->type->merge) 1756 if (max_size && ti->type->merge)
1768 max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); 1757 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1769 /* 1758 /*
1770 * If the target doesn't support merge method and some of the devices 1759 * If the target doesn't support merge method and some of the devices
1771 * provided their merge_bvec method (we know this by looking for the 1760 * provided their merge_bvec method (we know this by looking at
1772 * max_hw_sectors that dm_set_device_limits may set), then we can't 1761 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1773 * allow bios with multiple vector entries. So always set max_size 1762 * entries. So always set max_size to 0, and the code below allows
1774 * to 0, and the code below allows just one page. 1763 * just one page.
1775 */ 1764 */
1776 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1765 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1777 max_size = 0; 1766 max_size = 0;
@@ -2281,8 +2270,6 @@ static void dm_init_old_md_queue(struct mapped_device *md)
2281 2270
2282static void cleanup_mapped_device(struct mapped_device *md) 2271static void cleanup_mapped_device(struct mapped_device *md)
2283{ 2272{
2284 cleanup_srcu_struct(&md->io_barrier);
2285
2286 if (md->wq) 2273 if (md->wq)
2287 destroy_workqueue(md->wq); 2274 destroy_workqueue(md->wq);
2288 if (md->kworker_task) 2275 if (md->kworker_task)
@@ -2294,6 +2281,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
2294 if (md->bs) 2281 if (md->bs)
2295 bioset_free(md->bs); 2282 bioset_free(md->bs);
2296 2283
2284 cleanup_srcu_struct(&md->io_barrier);
2285
2297 if (md->disk) { 2286 if (md->disk) {
2298 spin_lock(&_minor_lock); 2287 spin_lock(&_minor_lock);
2299 md->disk->private_data = NULL; 2288 md->disk->private_data = NULL;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..0072190515e0 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,7 @@ struct resync_info {
44 44
45/* md_cluster_info flags */ 45/* md_cluster_info flags */
46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1 46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
47#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
47 48
48 49
49struct md_cluster_info { 50struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
275 276
276static void recover_prep(void *arg) 277static void recover_prep(void *arg)
277{ 278{
279 struct mddev *mddev = arg;
280 struct md_cluster_info *cinfo = mddev->cluster_info;
281 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
278} 282}
279 283
280static void recover_slot(void *arg, struct dlm_slot *slot) 284static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
307 311
308 cinfo->slot_number = our_slot; 312 cinfo->slot_number = our_slot;
309 complete(&cinfo->completion); 313 complete(&cinfo->completion);
314 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
310} 315}
311 316
312static const struct dlm_lockspace_ops md_ls_ops = { 317static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
816 resync_send(mddev, RESYNCING, 0, 0); 821 resync_send(mddev, RESYNCING, 0, 0);
817} 822}
818 823
819static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) 824static int area_resyncing(struct mddev *mddev, int direction,
825 sector_t lo, sector_t hi)
820{ 826{
821 struct md_cluster_info *cinfo = mddev->cluster_info; 827 struct md_cluster_info *cinfo = mddev->cluster_info;
822 int ret = 0; 828 int ret = 0;
823 struct suspend_info *s; 829 struct suspend_info *s;
824 830
831 if ((direction == READ) &&
832 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
833 return 1;
834
825 spin_lock_irq(&cinfo->suspend_lock); 835 spin_lock_irq(&cinfo->suspend_lock);
826 if (list_empty(&cinfo->suspend_list)) 836 if (list_empty(&cinfo->suspend_list))
827 goto out; 837 goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
18 int (*metadata_update_start)(struct mddev *mddev); 18 int (*metadata_update_start)(struct mddev *mddev);
19 int (*metadata_update_finish)(struct mddev *mddev); 19 int (*metadata_update_finish)(struct mddev *mddev);
20 int (*metadata_update_cancel)(struct mddev *mddev); 20 int (*metadata_update_cancel)(struct mddev *mddev);
21 int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); 21 int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); 22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
23 int (*add_new_disk_finish)(struct mddev *mddev); 23 int (*add_new_disk_finish)(struct mddev *mddev);
24 int (*new_disk_ack)(struct mddev *mddev, bool ack); 24 int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..e25f00f0138a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
5382{ 5382{
5383 struct md_personality *pers = mddev->pers; 5383 struct md_personality *pers = mddev->pers;
5384 mddev_detach(mddev); 5384 mddev_detach(mddev);
5385 /* Ensure ->event_work is done */
5386 flush_workqueue(md_misc_wq);
5385 spin_lock(&mddev->lock); 5387 spin_lock(&mddev->lock);
5386 mddev->ready = 0; 5388 mddev->ready = 0;
5387 mddev->pers = NULL; 5389 mddev->pers = NULL;
@@ -5757,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5757 char *ptr; 5759 char *ptr;
5758 int err; 5760 int err;
5759 5761
5760 file = kmalloc(sizeof(*file), GFP_NOIO); 5762 file = kzalloc(sizeof(*file), GFP_NOIO);
5761 if (!file) 5763 if (!file)
5762 return -ENOMEM; 5764 return -ENOMEM;
5763 5765
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
7437 err = request_module("md-cluster"); 7439 err = request_module("md-cluster");
7438 if (err) { 7440 if (err) {
7439 pr_err("md-cluster module not found.\n"); 7441 pr_err("md-cluster module not found.\n");
7440 return err; 7442 return -ENOENT;
7441 } 7443 }
7442 7444
7443 spin_lock(&pers_lock); 7445 spin_lock(&pers_lock);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
138 138
139extern struct dm_block_validator btree_node_validator; 139extern struct dm_block_validator btree_node_validator;
140 140
141/*
142 * Value type for upper levels of multi-level btrees.
143 */
144extern void init_le64_type(struct dm_transaction_manager *tm,
145 struct dm_btree_value_type *vt);
146
141#endif /* DM_BTREE_INTERNAL_H */ 147#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index e04cfd2d60ef..4222f774cf36 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
309 309
310 if (s < 0 && nr_center < -s) { 310 if (s < 0 && nr_center < -s) {
311 /* not enough in central node */ 311 /* not enough in central node */
312 shift(left, center, nr_center); 312 shift(left, center, -nr_center);
313 s = nr_center - target; 313 s += nr_center;
314 shift(left, right, s); 314 shift(left, right, s);
315 nr_right += s; 315 nr_right += s;
316 } else 316 } else
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
323 if (s > 0 && nr_center < s) { 323 if (s > 0 && nr_center < s) {
324 /* not enough in central node */ 324 /* not enough in central node */
325 shift(center, right, nr_center); 325 shift(center, right, nr_center);
326 s = target - nr_center; 326 s -= nr_center;
327 shift(left, right, s); 327 shift(left, right, s);
328 nr_left -= s; 328 nr_left -= s;
329 } else 329 } else
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
544 return r; 544 return r;
545} 545}
546 546
547static struct dm_btree_value_type le64_type = {
548 .context = NULL,
549 .size = sizeof(__le64),
550 .inc = NULL,
551 .dec = NULL,
552 .equal = NULL
553};
554
555int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, 547int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
556 uint64_t *keys, dm_block_t *new_root) 548 uint64_t *keys, dm_block_t *new_root)
557{ 549{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
559 int index = 0, r = 0; 551 int index = 0, r = 0;
560 struct shadow_spine spine; 552 struct shadow_spine spine;
561 struct btree_node *n; 553 struct btree_node *n;
554 struct dm_btree_value_type le64_vt;
562 555
556 init_le64_type(info->tm, &le64_vt);
563 init_shadow_spine(&spine, info); 557 init_shadow_spine(&spine, info);
564 for (level = 0; level < info->levels; level++) { 558 for (level = 0; level < info->levels; level++) {
565 r = remove_raw(&spine, info, 559 r = remove_raw(&spine, info,
566 (level == last_level ? 560 (level == last_level ?
567 &info->value_type : &le64_type), 561 &info->value_type : &le64_vt),
568 root, keys[level], (unsigned *)&index); 562 root, keys[level], (unsigned *)&index);
569 if (r < 0) 563 if (r < 0)
570 break; 564 break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
654 int index = 0, r = 0; 648 int index = 0, r = 0;
655 struct shadow_spine spine; 649 struct shadow_spine spine;
656 struct btree_node *n; 650 struct btree_node *n;
651 struct dm_btree_value_type le64_vt;
657 uint64_t k; 652 uint64_t k;
658 653
654 init_le64_type(info->tm, &le64_vt);
659 init_shadow_spine(&spine, info); 655 init_shadow_spine(&spine, info);
660 for (level = 0; level < last_level; level++) { 656 for (level = 0; level < last_level; level++) {
661 r = remove_raw(&spine, info, &le64_type, 657 r = remove_raw(&spine, info, &le64_vt,
662 root, keys[level], (unsigned *) &index); 658 root, keys[level], (unsigned *) &index);
663 if (r < 0) 659 if (r < 0)
664 goto out; 660 goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
689 value_ptr(n, index)); 685 value_ptr(n, index));
690 686
691 delete_at(n, index); 687 delete_at(n, index);
688 keys[last_level] = k + 1ull;
692 689
693 } else 690 } else
694 r = -ENODATA; 691 r = -ENODATA;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
249{ 249{
250 return s->root; 250 return s->root;
251} 251}
252
253static void le64_inc(void *context, const void *value_le)
254{
255 struct dm_transaction_manager *tm = context;
256 __le64 v_le;
257
258 memcpy(&v_le, value_le, sizeof(v_le));
259 dm_tm_inc(tm, le64_to_cpu(v_le));
260}
261
262static void le64_dec(void *context, const void *value_le)
263{
264 struct dm_transaction_manager *tm = context;
265 __le64 v_le;
266
267 memcpy(&v_le, value_le, sizeof(v_le));
268 dm_tm_dec(tm, le64_to_cpu(v_le));
269}
270
271static int le64_equal(void *context, const void *value1_le, const void *value2_le)
272{
273 __le64 v1_le, v2_le;
274
275 memcpy(&v1_le, value1_le, sizeof(v1_le));
276 memcpy(&v2_le, value2_le, sizeof(v2_le));
277 return v1_le == v2_le;
278}
279
280void init_le64_type(struct dm_transaction_manager *tm,
281 struct dm_btree_value_type *vt)
282{
283 vt->context = tm;
284 vt->size = sizeof(__le64);
285 vt->inc = le64_inc;
286 vt->dec = le64_dec;
287 vt->equal = le64_equal;
288}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 200ac12a1d40..c7726cebc495 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
255 int r; 255 int r;
256 struct del_stack *s; 256 struct del_stack *s;
257 257
258 s = kmalloc(sizeof(*s), GFP_KERNEL); 258 s = kmalloc(sizeof(*s), GFP_NOIO);
259 if (!s) 259 if (!s)
260 return -ENOMEM; 260 return -ENOMEM;
261 s->info = info; 261 s->info = info;
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
667 struct btree_node *n; 667 struct btree_node *n;
668 struct dm_btree_value_type le64_type; 668 struct dm_btree_value_type le64_type;
669 669
670 le64_type.context = NULL; 670 init_le64_type(info->tm, &le64_type);
671 le64_type.size = sizeof(__le64);
672 le64_type.inc = NULL;
673 le64_type.dec = NULL;
674 le64_type.equal = NULL;
675
676 init_shadow_spine(&spine, info); 671 init_shadow_spine(&spine, info);
677 672
678 for (level = 0; level < (info->levels - 1); level++) { 673 for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
336 spin_lock_irqsave(&conf->device_lock, flags); 336 spin_lock_irqsave(&conf->device_lock, flags);
337 if (r1_bio->mddev->degraded == conf->raid_disks || 337 if (r1_bio->mddev->degraded == conf->raid_disks ||
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && 338 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
339 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 339 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
340 uptodate = 1; 340 uptodate = 1;
341 spin_unlock_irqrestore(&conf->device_lock, flags); 341 spin_unlock_irqrestore(&conf->device_lock, flags);
342 } 342 }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
541 541
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || 542 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
543 (mddev_is_clustered(conf->mddev) && 543 (mddev_is_clustered(conf->mddev) &&
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, 544 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
545 this_sector + sectors))) 545 this_sector + sectors)))
546 choose_first = 1; 546 choose_first = 1;
547 else 547 else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1111 ((bio_end_sector(bio) > mddev->suspend_lo && 1111 ((bio_end_sector(bio) > mddev->suspend_lo &&
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1113 (mddev_is_clustered(mddev) && 1113 (mddev_is_clustered(mddev) &&
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1114 md_cluster_ops->area_resyncing(mddev, WRITE,
1115 bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1115 /* As the suspend_* range is controlled by 1116 /* As the suspend_* range is controlled by
1116 * userspace, we want an interruptible 1117 * userspace, we want an interruptible
1117 * wait. 1118 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || 1125 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1126 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1126 (mddev_is_clustered(mddev) && 1127 (mddev_is_clustered(mddev) &&
1127 !md_cluster_ops->area_resyncing(mddev, 1128 !md_cluster_ops->area_resyncing(mddev, WRITE,
1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1129 bio->bi_iter.bi_sector, bio_end_sector(bio))))
1129 break; 1130 break;
1130 schedule(); 1131 schedule();
@@ -1475,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1475{ 1476{
1476 char b[BDEVNAME_SIZE]; 1477 char b[BDEVNAME_SIZE];
1477 struct r1conf *conf = mddev->private; 1478 struct r1conf *conf = mddev->private;
1479 unsigned long flags;
1478 1480
1479 /* 1481 /*
1480 * If it is not operational, then we have already marked it as dead 1482 * If it is not operational, then we have already marked it as dead
@@ -1494,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1494 return; 1496 return;
1495 } 1497 }
1496 set_bit(Blocked, &rdev->flags); 1498 set_bit(Blocked, &rdev->flags);
1499 spin_lock_irqsave(&conf->device_lock, flags);
1497 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1500 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1498 unsigned long flags;
1499 spin_lock_irqsave(&conf->device_lock, flags);
1500 mddev->degraded++; 1501 mddev->degraded++;
1501 set_bit(Faulty, &rdev->flags); 1502 set_bit(Faulty, &rdev->flags);
1502 spin_unlock_irqrestore(&conf->device_lock, flags);
1503 } else 1503 } else
1504 set_bit(Faulty, &rdev->flags); 1504 set_bit(Faulty, &rdev->flags);
1505 spin_unlock_irqrestore(&conf->device_lock, flags);
1505 /* 1506 /*
1506 * if recovery is running, make sure it aborts. 1507 * if recovery is running, make sure it aborts.
1507 */ 1508 */
@@ -1567,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
1567 * Find all failed disks within the RAID1 configuration 1568 * Find all failed disks within the RAID1 configuration
1568 * and mark them readable. 1569 * and mark them readable.
1569 * Called under mddev lock, so rcu protection not needed. 1570 * Called under mddev lock, so rcu protection not needed.
1571 * device_lock used to avoid races with raid1_end_read_request
1572 * which expects 'In_sync' flags and ->degraded to be consistent.
1570 */ 1573 */
1574 spin_lock_irqsave(&conf->device_lock, flags);
1571 for (i = 0; i < conf->raid_disks; i++) { 1575 for (i = 0; i < conf->raid_disks; i++) {
1572 struct md_rdev *rdev = conf->mirrors[i].rdev; 1576 struct md_rdev *rdev = conf->mirrors[i].rdev;
1573 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1577 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
1598 sysfs_notify_dirent_safe(rdev->sysfs_state); 1602 sysfs_notify_dirent_safe(rdev->sysfs_state);
1599 } 1603 }
1600 } 1604 }
1601 spin_lock_irqsave(&conf->device_lock, flags);
1602 mddev->degraded -= count; 1605 mddev->degraded -= count;
1603 spin_unlock_irqrestore(&conf->device_lock, flags); 1606 spin_unlock_irqrestore(&conf->device_lock, flags);
1604 1607
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..38c58e19cfce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3556 /* far_copies must be 1 */ 3556 /* far_copies must be 1 */
3557 conf->prev.stride = conf->dev_sectors; 3557 conf->prev.stride = conf->dev_sectors;
3558 } 3558 }
3559 conf->reshape_safe = conf->reshape_progress;
3559 spin_lock_init(&conf->device_lock); 3560 spin_lock_init(&conf->device_lock);
3560 INIT_LIST_HEAD(&conf->retry_list); 3561 INIT_LIST_HEAD(&conf->retry_list);
3561 3562
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
3760 } 3761 }
3761 conf->offset_diff = min_offset_diff; 3762 conf->offset_diff = min_offset_diff;
3762 3763
3763 conf->reshape_safe = conf->reshape_progress;
3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4103 conf->reshape_progress = size; 4103 conf->reshape_progress = size;
4104 } else 4104 } else
4105 conf->reshape_progress = 0; 4105 conf->reshape_progress = 0;
4106 conf->reshape_safe = conf->reshape_progress;
4106 spin_unlock_irq(&conf->device_lock); 4107 spin_unlock_irq(&conf->device_lock);
4107 4108
4108 if (mddev->delta_disks && mddev->bitmap) { 4109 if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
4170 rdev->new_data_offset = rdev->data_offset; 4171 rdev->new_data_offset = rdev->data_offset;
4171 smp_wmb(); 4172 smp_wmb();
4172 conf->reshape_progress = MaxSector; 4173 conf->reshape_progress = MaxSector;
4174 conf->reshape_safe = MaxSector;
4173 mddev->reshape_position = MaxSector; 4175 mddev->reshape_position = MaxSector;
4174 spin_unlock_irq(&conf->device_lock); 4176 spin_unlock_irq(&conf->device_lock);
4175 return ret; 4177 return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
4524 md_finish_reshape(conf->mddev); 4526 md_finish_reshape(conf->mddev);
4525 smp_wmb(); 4527 smp_wmb();
4526 conf->reshape_progress = MaxSector; 4528 conf->reshape_progress = MaxSector;
4529 conf->reshape_safe = MaxSector;
4527 spin_unlock_irq(&conf->device_lock); 4530 spin_unlock_irq(&conf->device_lock);
4528 4531
4529 /* read-ahead size must cover two whole stripes, which is 4532 /* read-ahead size must cover two whole stripes, which is
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..f757023fc458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2162 if (!sc) 2162 if (!sc)
2163 return -ENOMEM; 2163 return -ENOMEM;
2164 2164
2165 /* Need to ensure auto-resizing doesn't interfere */
2166 mutex_lock(&conf->cache_size_mutex);
2167
2165 for (i = conf->max_nr_stripes; i; i--) { 2168 for (i = conf->max_nr_stripes; i; i--) {
2166 nsh = alloc_stripe(sc, GFP_KERNEL); 2169 nsh = alloc_stripe(sc, GFP_KERNEL);
2167 if (!nsh) 2170 if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2178 kmem_cache_free(sc, nsh); 2181 kmem_cache_free(sc, nsh);
2179 } 2182 }
2180 kmem_cache_destroy(sc); 2183 kmem_cache_destroy(sc);
2184 mutex_unlock(&conf->cache_size_mutex);
2181 return -ENOMEM; 2185 return -ENOMEM;
2182 } 2186 }
2183 /* Step 2 - Must use GFP_NOIO now. 2187 /* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2224 } else 2228 } else
2225 err = -ENOMEM; 2229 err = -ENOMEM;
2226 2230
2231 mutex_unlock(&conf->cache_size_mutex);
2227 /* Step 4, return new stripes to service */ 2232 /* Step 4, return new stripes to service */
2228 while(!list_empty(&newstripes)) { 2233 while(!list_empty(&newstripes)) {
2229 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2234 nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2251,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2251static int drop_one_stripe(struct r5conf *conf) 2256static int drop_one_stripe(struct r5conf *conf)
2252{ 2257{
2253 struct stripe_head *sh; 2258 struct stripe_head *sh;
2254 int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 2259 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2255 2260
2256 spin_lock_irq(conf->hash_locks + hash); 2261 spin_lock_irq(conf->hash_locks + hash);
2257 sh = get_free_stripe(conf, hash); 2262 sh = get_free_stripe(conf, hash);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4061 &first_bad, &bad_sectors)) 4066 &first_bad, &bad_sectors))
4062 set_bit(R5_ReadRepl, &dev->flags); 4067 set_bit(R5_ReadRepl, &dev->flags);
4063 else { 4068 else {
4064 if (rdev) 4069 if (rdev && !test_bit(Faulty, &rdev->flags))
4065 set_bit(R5_NeedReplace, &dev->flags); 4070 set_bit(R5_NeedReplace, &dev->flags);
4071 else
4072 clear_bit(R5_NeedReplace, &dev->flags);
4066 rdev = rcu_dereference(conf->disks[i].rdev); 4073 rdev = rcu_dereference(conf->disks[i].rdev);
4067 clear_bit(R5_ReadRepl, &dev->flags); 4074 clear_bit(R5_ReadRepl, &dev->flags);
4068 } 4075 }
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
5857 pr_debug("%d stripes handled\n", handled); 5864 pr_debug("%d stripes handled\n", handled);
5858 5865
5859 spin_unlock_irq(&conf->device_lock); 5866 spin_unlock_irq(&conf->device_lock);
5860 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { 5867 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
5868 mutex_trylock(&conf->cache_size_mutex)) {
5861 grow_one_stripe(conf, __GFP_NOWARN); 5869 grow_one_stripe(conf, __GFP_NOWARN);
5862 /* Set flag even if allocation failed. This helps 5870 /* Set flag even if allocation failed. This helps
5863 * slow down allocation requests when mem is short 5871 * slow down allocation requests when mem is short
5864 */ 5872 */
5865 set_bit(R5_DID_ALLOC, &conf->cache_state); 5873 set_bit(R5_DID_ALLOC, &conf->cache_state);
5874 mutex_unlock(&conf->cache_size_mutex);
5866 } 5875 }
5867 5876
5868 async_tx_issue_pending_all(); 5877 async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
5894 return -EINVAL; 5903 return -EINVAL;
5895 5904
5896 conf->min_nr_stripes = size; 5905 conf->min_nr_stripes = size;
5906 mutex_lock(&conf->cache_size_mutex);
5897 while (size < conf->max_nr_stripes && 5907 while (size < conf->max_nr_stripes &&
5898 drop_one_stripe(conf)) 5908 drop_one_stripe(conf))
5899 ; 5909 ;
5910 mutex_unlock(&conf->cache_size_mutex);
5900 5911
5901 5912
5902 err = md_allow_write(mddev); 5913 err = md_allow_write(mddev);
5903 if (err) 5914 if (err)
5904 return err; 5915 return err;
5905 5916
5917 mutex_lock(&conf->cache_size_mutex);
5906 while (size > conf->max_nr_stripes) 5918 while (size > conf->max_nr_stripes)
5907 if (!grow_one_stripe(conf, GFP_KERNEL)) 5919 if (!grow_one_stripe(conf, GFP_KERNEL))
5908 break; 5920 break;
5921 mutex_unlock(&conf->cache_size_mutex);
5909 5922
5910 return 0; 5923 return 0;
5911} 5924}
@@ -6371,11 +6384,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
6371 struct shrink_control *sc) 6384 struct shrink_control *sc)
6372{ 6385{
6373 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6386 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6374 int ret = 0; 6387 unsigned long ret = SHRINK_STOP;
6375 while (ret < sc->nr_to_scan) { 6388
6376 if (drop_one_stripe(conf) == 0) 6389 if (mutex_trylock(&conf->cache_size_mutex)) {
6377 return SHRINK_STOP; 6390 ret= 0;
6378 ret++; 6391 while (ret < sc->nr_to_scan &&
6392 conf->max_nr_stripes > conf->min_nr_stripes) {
6393 if (drop_one_stripe(conf) == 0) {
6394 ret = SHRINK_STOP;
6395 break;
6396 }
6397 ret++;
6398 }
6399 mutex_unlock(&conf->cache_size_mutex);
6379 } 6400 }
6380 return ret; 6401 return ret;
6381} 6402}
@@ -6444,6 +6465,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
6444 goto abort; 6465 goto abort;
6445 spin_lock_init(&conf->device_lock); 6466 spin_lock_init(&conf->device_lock);
6446 seqcount_init(&conf->gen_lock); 6467 seqcount_init(&conf->gen_lock);
6468 mutex_init(&conf->cache_size_mutex);
6447 init_waitqueue_head(&conf->wait_for_quiescent); 6469 init_waitqueue_head(&conf->wait_for_quiescent);
6448 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 6470 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
6449 init_waitqueue_head(&conf->wait_for_stripe[i]); 6471 init_waitqueue_head(&conf->wait_for_stripe[i]);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..d05144278690 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -482,7 +482,8 @@ struct r5conf {
482 */ 482 */
483 int active_name; 483 int active_name;
484 char cache_name[2][32]; 484 char cache_name[2][32];
485 struct kmem_cache *slab_cache; /* for allocating stripes */ 485 struct kmem_cache *slab_cache; /* for allocating stripes */
486 struct mutex cache_size_mutex; /* Protect changes to cache size */
486 487
487 int seq_flush, seq_write; 488 int seq_flush, seq_write;
488 int quiesce; 489 int quiesce;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f5850ff1..5ab90f36a6a6 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -240,7 +240,7 @@ config DVB_SI21XX
240 240
241config DVB_TS2020 241config DVB_TS2020
242 tristate "Montage Tehnology TS2020 based tuners" 242 tristate "Montage Tehnology TS2020 based tuners"
243 depends on DVB_CORE 243 depends on DVB_CORE && I2C
244 select REGMAP_I2C 244 select REGMAP_I2C
245 default m if !MEDIA_SUBDRV_AUTOSELECT 245 default m if !MEDIA_SUBDRV_AUTOSELECT
246 help 246 help
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c3c386..6a1c0089bb62 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_COBALT
2 tristate "Cisco Cobalt support" 2 tristate "Cisco Cobalt support"
3 depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER 3 depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
4 depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB 4 depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
5 depends on SND
5 select I2C_ALGOBIT 6 select I2C_ALGOBIT
6 select VIDEO_ADV7604 7 select VIDEO_ADV7604
7 select VIDEO_ADV7511 8 select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9cf339..d1f5898d11ba 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@ done:
139 also know about dropped frames. */ 139 also know about dropped frames. */
140 cb->vb.v4l2_buf.sequence = s->sequence++; 140 cb->vb.v4l2_buf.sequence = s->sequence++;
141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? 141 vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
142 VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE); 142 VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
143} 143}
144 144
145irqreturn_t cobalt_irq_handler(int irq, void *dev_id) 145irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */ 39 */
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/module.h> 43#include <linux/module.h>
42#include <linux/kernel.h> 44#include <linux/kernel.h>
43#include <linux/fb.h> 45#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
1171{ 1173{
1172 int rc; 1174 int rc;
1173 1175
1176#ifdef CONFIG_X86_64
1177 if (pat_enabled()) {
1178 pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
1179 return -ENODEV;
1180 }
1181#endif
1182
1174 if (itv->osd_info) { 1183 if (itv->osd_info) {
1175 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); 1184 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
1176 return -EBUSY; 1185 return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
1265 int registered = 0; 1274 int registered = 0;
1266 int err; 1275 int err;
1267 1276
1268#ifdef CONFIG_X86_64
1269 if (WARN(pat_enabled(),
1270 "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
1271 return -ENODEV;
1272 }
1273#endif
1274 1277
1275 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1278 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
1276 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", 1279 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e039f7..87990ece5848 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,10 +130,11 @@ err:
130 130
131int mantis_dma_init(struct mantis_pci *mantis) 131int mantis_dma_init(struct mantis_pci *mantis)
132{ 132{
133 int err = 0; 133 int err;
134 134
135 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); 135 dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
136 if (mantis_alloc_buffers(mantis) < 0) { 136 err = mantis_alloc_buffers(mantis);
137 if (err < 0) {
137 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); 138 dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
138 139
139 /* Stop RISC Engine */ 140 /* Stop RISC Engine */
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd74391..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@ out:
184 return -EINVAL; 184 return -EINVAL;
185} 185}
186 186
187static struct ir_raw_timings_manchester ir_rc5_timings = {
188 .leader = RC5_UNIT,
189 .pulse_space_start = 0,
190 .clock = RC5_UNIT,
191 .trailer_space = RC5_UNIT * 10,
192};
193
194static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
195 {
196 .leader = RC5_UNIT,
197 .pulse_space_start = 0,
198 .clock = RC5_UNIT,
199 .trailer_space = RC5X_SPACE,
200 },
201 {
202 .clock = RC5_UNIT,
203 .trailer_space = RC5_UNIT * 10,
204 },
205};
206
207static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
208 .leader = RC5_UNIT,
209 .pulse_space_start = 0,
210 .clock = RC5_UNIT,
211 .trailer_space = RC5_UNIT * 10,
212};
213
214static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
215 unsigned int important_bits)
216{
217 /* all important bits of scancode should be set in mask */
218 if (~scancode->mask & important_bits)
219 return -EINVAL;
220 /* extra bits in mask should be zero in data */
221 if (scancode->mask & scancode->data & ~important_bits)
222 return -EINVAL;
223 return 0;
224}
225
226/**
227 * ir_rc5_encode() - Encode a scancode as a stream of raw events
228 *
229 * @protocols: allowed protocols
230 * @scancode: scancode filter describing scancode (helps distinguish between
231 * protocol subtypes when scancode is ambiguous)
232 * @events: array of raw ir events to write into
233 * @max: maximum size of @events
234 *
235 * Returns: The number of events written.
236 * -ENOBUFS if there isn't enough space in the array to fit the
237 * encoding. In this case all @max events will have been written.
238 * -EINVAL if the scancode is ambiguous or invalid.
239 */
240static int ir_rc5_encode(u64 protocols,
241 const struct rc_scancode_filter *scancode,
242 struct ir_raw_event *events, unsigned int max)
243{
244 int ret;
245 struct ir_raw_event *e = events;
246 unsigned int data, xdata, command, commandx, system;
247
248 /* Detect protocol and convert scancode to raw data */
249 if (protocols & RC_BIT_RC5 &&
250 !ir_rc5_validate_filter(scancode, 0x1f7f)) {
251 /* decode scancode */
252 command = (scancode->data & 0x003f) >> 0;
253 commandx = (scancode->data & 0x0040) >> 6;
254 system = (scancode->data & 0x1f00) >> 8;
255 /* encode data */
256 data = !commandx << 12 | system << 6 | command;
257
258 /* Modulate the data */
259 ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
260 data);
261 if (ret < 0)
262 return ret;
263 } else if (protocols & RC_BIT_RC5X &&
264 !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
265 /* decode scancode */
266 xdata = (scancode->data & 0x00003f) >> 0;
267 command = (scancode->data & 0x003f00) >> 8;
268 commandx = (scancode->data & 0x004000) >> 14;
269 system = (scancode->data & 0x1f0000) >> 16;
270 /* commandx and system overlap, bits must match when encoded */
271 if (commandx == (system & 0x1))
272 return -EINVAL;
273 /* encode data */
274 data = 1 << 18 | system << 12 | command << 6 | xdata;
275
276 /* Modulate the data */
277 ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
278 CHECK_RC5X_NBITS,
279 data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
280 if (ret < 0)
281 return ret;
282 ret = ir_raw_gen_manchester(&e, max - (e - events),
283 &ir_rc5x_timings[1],
284 RC5X_NBITS - CHECK_RC5X_NBITS,
285 data);
286 if (ret < 0)
287 return ret;
288 } else if (protocols & RC_BIT_RC5_SZ &&
289 !ir_rc5_validate_filter(scancode, 0x2fff)) {
290 /* RC5-SZ scancode is raw enough for Manchester as it is */
291 ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
292 RC5_SZ_NBITS, scancode->data & 0x2fff);
293 if (ret < 0)
294 return ret;
295 } else {
296 return -EINVAL;
297 }
298
299 return e - events;
300}
301
302static struct ir_raw_handler rc5_handler = { 187static struct ir_raw_handler rc5_handler = {
303 .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, 188 .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
304 .decode = ir_rc5_decode, 189 .decode = ir_rc5_decode,
305 .encode = ir_rc5_encode,
306}; 190};
307 191
308static int __init ir_rc5_decode_init(void) 192static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70baf6e0c..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@ out:
291 return -EINVAL; 291 return -EINVAL;
292} 292}
293 293
294static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
295 {
296 .leader = RC6_PREFIX_PULSE,
297 .pulse_space_start = 0,
298 .clock = RC6_UNIT,
299 .invert = 1,
300 .trailer_space = RC6_PREFIX_SPACE,
301 },
302 {
303 .clock = RC6_UNIT,
304 .invert = 1,
305 },
306 {
307 .clock = RC6_UNIT * 2,
308 .invert = 1,
309 },
310 {
311 .clock = RC6_UNIT,
312 .invert = 1,
313 .trailer_space = RC6_SUFFIX_SPACE,
314 },
315};
316
317static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
318 unsigned int important_bits)
319{
320 /* all important bits of scancode should be set in mask */
321 if (~scancode->mask & important_bits)
322 return -EINVAL;
323 /* extra bits in mask should be zero in data */
324 if (scancode->mask & scancode->data & ~important_bits)
325 return -EINVAL;
326 return 0;
327}
328
329/**
330 * ir_rc6_encode() - Encode a scancode as a stream of raw events
331 *
332 * @protocols: allowed protocols
333 * @scancode: scancode filter describing scancode (helps distinguish between
334 * protocol subtypes when scancode is ambiguous)
335 * @events: array of raw ir events to write into
336 * @max: maximum size of @events
337 *
338 * Returns: The number of events written.
339 * -ENOBUFS if there isn't enough space in the array to fit the
340 * encoding. In this case all @max events will have been written.
341 * -EINVAL if the scancode is ambiguous or invalid.
342 */
343static int ir_rc6_encode(u64 protocols,
344 const struct rc_scancode_filter *scancode,
345 struct ir_raw_event *events, unsigned int max)
346{
347 int ret;
348 struct ir_raw_event *e = events;
349
350 if (protocols & RC_BIT_RC6_0 &&
351 !ir_rc6_validate_filter(scancode, 0xffff)) {
352
353 /* Modulate the preamble */
354 ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
355 if (ret < 0)
356 return ret;
357
358 /* Modulate the header (Start Bit & Mode-0) */
359 ret = ir_raw_gen_manchester(&e, max - (e - events),
360 &ir_rc6_timings[1],
361 RC6_HEADER_NBITS, (1 << 3));
362 if (ret < 0)
363 return ret;
364
365 /* Modulate Trailer Bit */
366 ret = ir_raw_gen_manchester(&e, max - (e - events),
367 &ir_rc6_timings[2], 1, 0);
368 if (ret < 0)
369 return ret;
370
371 /* Modulate rest of the data */
372 ret = ir_raw_gen_manchester(&e, max - (e - events),
373 &ir_rc6_timings[3], RC6_0_NBITS,
374 scancode->data);
375 if (ret < 0)
376 return ret;
377
378 } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
379 RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
380 !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
381
382 /* Modulate the preamble */
383 ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
384 if (ret < 0)
385 return ret;
386
387 /* Modulate the header (Start Bit & Header-version 6 */
388 ret = ir_raw_gen_manchester(&e, max - (e - events),
389 &ir_rc6_timings[1],
390 RC6_HEADER_NBITS, (1 << 3 | 6));
391 if (ret < 0)
392 return ret;
393
394 /* Modulate Trailer Bit */
395 ret = ir_raw_gen_manchester(&e, max - (e - events),
396 &ir_rc6_timings[2], 1, 0);
397 if (ret < 0)
398 return ret;
399
400 /* Modulate rest of the data */
401 ret = ir_raw_gen_manchester(&e, max - (e - events),
402 &ir_rc6_timings[3],
403 fls(scancode->mask),
404 scancode->data);
405 if (ret < 0)
406 return ret;
407
408 } else {
409 return -EINVAL;
410 }
411
412 return e - events;
413}
414
415static struct ir_raw_handler rc6_handler = { 294static struct ir_raw_handler rc6_handler = {
416 .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | 295 .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
417 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | 296 RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
418 RC_BIT_RC6_MCE, 297 RC_BIT_RC6_MCE,
419 .decode = ir_rc6_decode, 298 .decode = ir_rc6_decode,
420 .encode = ir_rc6_encode,
421}; 299};
422 300
423static int __init ir_rc6_decode_init(void) 301static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb5971fd52..85af7a869167 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
526 return 0; 526 return 0;
527} 527}
528 528
529static int nvt_write_wakeup_codes(struct rc_dev *dev,
530 const u8 *wakeup_sample_buf, int count)
531{
532 int i = 0;
533 u8 reg, reg_learn_mode;
534 unsigned long flags;
535 struct nvt_dev *nvt = dev->priv;
536
537 nvt_dbg_wake("writing wakeup samples");
538
539 reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
540 reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
541 reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
542
543 /* Lock the learn area to prevent racing with wake-isr */
544 spin_lock_irqsave(&nvt->nvt_lock, flags);
545
546 /* Enable fifo writes */
547 nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
548
549 /* Clear cir wake rx fifo */
550 nvt_clear_cir_wake_fifo(nvt);
551
552 if (count > WAKE_FIFO_LEN) {
553 nvt_dbg_wake("HW FIFO too small for all wake samples");
554 count = WAKE_FIFO_LEN;
555 }
556
557 if (count)
558 pr_info("Wake samples (%d) =", count);
559 else
560 pr_info("Wake sample fifo cleared");
561
562 /* Write wake samples to fifo */
563 for (i = 0; i < count; i++) {
564 pr_cont(" %02x", wakeup_sample_buf[i]);
565 nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
566 CIR_WAKE_WR_FIFO_DATA);
567 }
568 pr_cont("\n");
569
570 /* Switch cir to wakeup mode and disable fifo writing */
571 nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
572
573 /* Set number of bytes needed for wake */
574 nvt_cir_wake_reg_write(nvt, count ? count :
575 CIR_WAKE_FIFO_CMP_BYTES,
576 CIR_WAKE_FIFO_CMP_DEEP);
577
578 spin_unlock_irqrestore(&nvt->nvt_lock, flags);
579
580 return 0;
581}
582
583static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
584 struct rc_scancode_filter *sc_filter)
585{
586 u8 *reg_buf;
587 u8 buf_val;
588 int i, ret, count;
589 unsigned int val;
590 struct ir_raw_event *raw;
591 bool complete;
592
593 /* Require both mask and data to be set before actually committing */
594 if (!sc_filter->mask || !sc_filter->data)
595 return 0;
596
597 raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
598 if (!raw)
599 return -ENOMEM;
600
601 ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
602 raw, WAKE_FIFO_LEN);
603 complete = (ret != -ENOBUFS);
604 if (!complete)
605 ret = WAKE_FIFO_LEN;
606 else if (ret < 0)
607 goto out_raw;
608
609 reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
610 if (!reg_buf) {
611 ret = -ENOMEM;
612 goto out_raw;
613 }
614
615 /* Inspect the ir samples */
616 for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
617 val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
618
619 /* Split too large values into several smaller ones */
620 while (val > 0 && count < WAKE_FIFO_LEN) {
621
622 /* Skip last value for better comparison tolerance */
623 if (complete && i == ret - 1 && val < BUF_LEN_MASK)
624 break;
625
626 /* Clamp values to BUF_LEN_MASK at most */
627 buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
628
629 reg_buf[count] = buf_val;
630 val -= buf_val;
631 if ((raw[i]).pulse)
632 reg_buf[count] |= BUF_PULSE_BIT;
633 count++;
634 }
635 }
636
637 ret = nvt_write_wakeup_codes(dev, reg_buf, count);
638
639 kfree(reg_buf);
640out_raw:
641 kfree(raw);
642
643 return ret;
644}
645
646/* Dummy implementation. nuvoton is agnostic to the protocol used */
647static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
648 u64 *rc_type)
649{
650 return 0;
651}
652
653/* 529/*
654 * nvt_tx_ir 530 * nvt_tx_ir
655 * 531 *
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1167 /* Set up the rc device */ 1043 /* Set up the rc device */
1168 rdev->priv = nvt; 1044 rdev->priv = nvt;
1169 rdev->driver_type = RC_DRIVER_IR_RAW; 1045 rdev->driver_type = RC_DRIVER_IR_RAW;
1170 rdev->encode_wakeup = true;
1171 rdev->allowed_protocols = RC_BIT_ALL; 1046 rdev->allowed_protocols = RC_BIT_ALL;
1172 rdev->open = nvt_open; 1047 rdev->open = nvt_open;
1173 rdev->close = nvt_close; 1048 rdev->close = nvt_close;
1174 rdev->tx_ir = nvt_tx_ir; 1049 rdev->tx_ir = nvt_tx_ir;
1175 rdev->s_tx_carrier = nvt_set_tx_carrier; 1050 rdev->s_tx_carrier = nvt_set_tx_carrier;
1176 rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
1177 rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
1178 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; 1051 rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
1179 rdev->input_phys = "nuvoton/cir0"; 1052 rdev->input_phys = "nuvoton/cir0";
1180 rdev->input_id.bustype = BUS_HOST; 1053 rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161c2a88..e1cf23c3875b 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@ static int debug;
63 */ 63 */
64#define TX_BUF_LEN 256 64#define TX_BUF_LEN 256
65#define RX_BUF_LEN 32 65#define RX_BUF_LEN 32
66#define WAKE_FIFO_LEN 67
67 66
68struct nvt_dev { 67struct nvt_dev {
69 struct pnp_dev *pdev; 68 struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa2f2a7..b68d4f762734 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@ struct ir_raw_handler {
25 25
26 u64 protocols; /* which are handled by this handler */ 26 u64 protocols; /* which are handled by this handler */
27 int (*decode)(struct rc_dev *dev, struct ir_raw_event event); 27 int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
28 int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
29 struct ir_raw_event *events, unsigned int max);
30 28
31 /* These two should only be used by the lirc decoder */ 29 /* These two should only be used by the lirc decoder */
32 int (*raw_register)(struct rc_dev *dev); 30 int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
152#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) 150#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
153#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") 151#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
154 152
155/* functions for IR encoders */
156
157static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
158 unsigned int pulse,
159 u32 duration)
160{
161 init_ir_raw_event(ev);
162 ev->duration = duration;
163 ev->pulse = pulse;
164}
165
166/**
167 * struct ir_raw_timings_manchester - Manchester coding timings
168 * @leader: duration of leader pulse (if any) 0 if continuing
169 * existing signal (see @pulse_space_start)
170 * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
171 * @clock: duration of each pulse/space in ns
172 * @invert: if set clock logic is inverted
173 * (0 = space + pulse, 1 = pulse + space)
174 * @trailer_space: duration of trailer space in ns
175 */
176struct ir_raw_timings_manchester {
177 unsigned int leader;
178 unsigned int pulse_space_start:1;
179 unsigned int clock;
180 unsigned int invert:1;
181 unsigned int trailer_space;
182};
183
184int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
185 const struct ir_raw_timings_manchester *timings,
186 unsigned int n, unsigned int data);
187
188/* 153/*
189 * Routines from rc-raw.c to be used internally and by decoders 154 * Routines from rc-raw.c to be used internally and by decoders
190 */ 155 */
191u64 ir_raw_get_allowed_protocols(void); 156u64 ir_raw_get_allowed_protocols(void);
192u64 ir_raw_get_encode_protocols(void);
193int ir_raw_event_register(struct rc_dev *dev); 157int ir_raw_event_register(struct rc_dev *dev);
194void ir_raw_event_unregister(struct rc_dev *dev); 158void ir_raw_event_unregister(struct rc_dev *dev);
195int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); 159int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645c731c..b732ac6a26d8 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
30static DEFINE_MUTEX(ir_raw_handler_lock); 30static DEFINE_MUTEX(ir_raw_handler_lock);
31static LIST_HEAD(ir_raw_handler_list); 31static LIST_HEAD(ir_raw_handler_list);
32static u64 available_protocols; 32static u64 available_protocols;
33static u64 encode_protocols;
34 33
35static int ir_raw_event_thread(void *data) 34static int ir_raw_event_thread(void *data)
36{ 35{
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
241 return protocols; 240 return protocols;
242} 241}
243 242
244/* used internally by the sysfs interface */
245u64
246ir_raw_get_encode_protocols(void)
247{
248 u64 protocols;
249
250 mutex_lock(&ir_raw_handler_lock);
251 protocols = encode_protocols;
252 mutex_unlock(&ir_raw_handler_lock);
253 return protocols;
254}
255
256static int change_protocol(struct rc_dev *dev, u64 *rc_type) 243static int change_protocol(struct rc_dev *dev, u64 *rc_type)
257{ 244{
258 /* the caller will update dev->enabled_protocols */ 245 /* the caller will update dev->enabled_protocols */
259 return 0; 246 return 0;
260} 247}
261 248
262/**
263 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
264 * @ev: Pointer to pointer to next free event. *@ev is incremented for
265 * each raw event filled.
266 * @max: Maximum number of raw events to fill.
267 * @timings: Manchester modulation timings.
268 * @n: Number of bits of data.
269 * @data: Data bits to encode.
270 *
271 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
272 * modulation with the timing characteristics described by @timings, writing up
273 * to @max raw IR events using the *@ev pointer.
274 *
275 * Returns: 0 on success.
276 * -ENOBUFS if there isn't enough space in the array to fit the
277 * full encoded data. In this case all @max events will have been
278 * written.
279 */
280int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
281 const struct ir_raw_timings_manchester *timings,
282 unsigned int n, unsigned int data)
283{
284 bool need_pulse;
285 unsigned int i;
286 int ret = -ENOBUFS;
287
288 i = 1 << (n - 1);
289
290 if (timings->leader) {
291 if (!max--)
292 return ret;
293 if (timings->pulse_space_start) {
294 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
295
296 if (!max--)
297 return ret;
298 init_ir_raw_event_duration((*ev), 0, timings->leader);
299 } else {
300 init_ir_raw_event_duration((*ev), 1, timings->leader);
301 }
302 i >>= 1;
303 } else {
304 /* continue existing signal */
305 --(*ev);
306 }
307 /* from here on *ev will point to the last event rather than the next */
308
309 while (n && i > 0) {
310 need_pulse = !(data & i);
311 if (timings->invert)
312 need_pulse = !need_pulse;
313 if (need_pulse == !!(*ev)->pulse) {
314 (*ev)->duration += timings->clock;
315 } else {
316 if (!max--)
317 goto nobufs;
318 init_ir_raw_event_duration(++(*ev), need_pulse,
319 timings->clock);
320 }
321
322 if (!max--)
323 goto nobufs;
324 init_ir_raw_event_duration(++(*ev), !need_pulse,
325 timings->clock);
326 i >>= 1;
327 }
328
329 if (timings->trailer_space) {
330 if (!(*ev)->pulse)
331 (*ev)->duration += timings->trailer_space;
332 else if (!max--)
333 goto nobufs;
334 else
335 init_ir_raw_event_duration(++(*ev), 0,
336 timings->trailer_space);
337 }
338
339 ret = 0;
340nobufs:
341 /* point to the next event rather than last event before returning */
342 ++(*ev);
343 return ret;
344}
345EXPORT_SYMBOL(ir_raw_gen_manchester);
346
347/**
348 * ir_raw_encode_scancode() - Encode a scancode as raw events
349 *
350 * @protocols: permitted protocols
351 * @scancode: scancode filter describing a single scancode
352 * @events: array of raw events to write into
353 * @max: max number of raw events
354 *
355 * Attempts to encode the scancode as raw events.
356 *
357 * Returns: The number of events written.
358 * -ENOBUFS if there isn't enough space in the array to fit the
359 * encoding. In this case all @max events will have been written.
360 * -EINVAL if the scancode is ambiguous or invalid, or if no
361 * compatible encoder was found.
362 */
363int ir_raw_encode_scancode(u64 protocols,
364 const struct rc_scancode_filter *scancode,
365 struct ir_raw_event *events, unsigned int max)
366{
367 struct ir_raw_handler *handler;
368 int ret = -EINVAL;
369
370 mutex_lock(&ir_raw_handler_lock);
371 list_for_each_entry(handler, &ir_raw_handler_list, list) {
372 if (handler->protocols & protocols && handler->encode) {
373 ret = handler->encode(protocols, scancode, events, max);
374 if (ret >= 0 || ret == -ENOBUFS)
375 break;
376 }
377 }
378 mutex_unlock(&ir_raw_handler_lock);
379
380 return ret;
381}
382EXPORT_SYMBOL(ir_raw_encode_scancode);
383
384/* 249/*
385 * Used to (un)register raw event clients 250 * Used to (un)register raw event clients
386 */ 251 */
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
463 list_for_each_entry(raw, &ir_raw_client_list, list) 328 list_for_each_entry(raw, &ir_raw_client_list, list)
464 ir_raw_handler->raw_register(raw->dev); 329 ir_raw_handler->raw_register(raw->dev);
465 available_protocols |= ir_raw_handler->protocols; 330 available_protocols |= ir_raw_handler->protocols;
466 if (ir_raw_handler->encode)
467 encode_protocols |= ir_raw_handler->protocols;
468 mutex_unlock(&ir_raw_handler_lock); 331 mutex_unlock(&ir_raw_handler_lock);
469 332
470 return 0; 333 return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
481 list_for_each_entry(raw, &ir_raw_client_list, list) 344 list_for_each_entry(raw, &ir_raw_client_list, list)
482 ir_raw_handler->raw_unregister(raw->dev); 345 ir_raw_handler->raw_unregister(raw->dev);
483 available_protocols &= ~ir_raw_handler->protocols; 346 available_protocols &= ~ir_raw_handler->protocols;
484 if (ir_raw_handler->encode)
485 encode_protocols &= ~ir_raw_handler->protocols;
486 mutex_unlock(&ir_raw_handler_lock); 347 mutex_unlock(&ir_raw_handler_lock);
487} 348}
488EXPORT_SYMBOL(ir_raw_handler_unregister); 349EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63ce985..63dace8198b0 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <media/rc-core.h> 29#include <media/rc-core.h>
31 30
32#define DRIVER_NAME "rc-loopback" 31#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
177 return 0; 176 return 0;
178} 177}
179 178
180static int loop_set_wakeup_filter(struct rc_dev *dev,
181 struct rc_scancode_filter *sc_filter)
182{
183 static const unsigned int max = 512;
184 struct ir_raw_event *raw;
185 int ret;
186 int i;
187
188 /* fine to disable filter */
189 if (!sc_filter->mask)
190 return 0;
191
192 /* encode the specified filter and loop it back */
193 raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
194 ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
195 raw, max);
196 /* still loop back the partial raw IR even if it's incomplete */
197 if (ret == -ENOBUFS)
198 ret = max;
199 if (ret >= 0) {
200 /* do the loopback */
201 for (i = 0; i < ret; ++i)
202 ir_raw_event_store(dev, &raw[i]);
203 ir_raw_event_handle(dev);
204
205 ret = 0;
206 }
207
208 kfree(raw);
209
210 return ret;
211}
212
213static int __init loop_init(void) 179static int __init loop_init(void)
214{ 180{
215 struct rc_dev *rc; 181 struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
229 rc->map_name = RC_MAP_EMPTY; 195 rc->map_name = RC_MAP_EMPTY;
230 rc->priv = &loopdev; 196 rc->priv = &loopdev;
231 rc->driver_type = RC_DRIVER_IR_RAW; 197 rc->driver_type = RC_DRIVER_IR_RAW;
232 rc->encode_wakeup = true;
233 rc->allowed_protocols = RC_BIT_ALL; 198 rc->allowed_protocols = RC_BIT_ALL;
234 rc->timeout = 100 * 1000 * 1000; /* 100 ms */ 199 rc->timeout = 100 * 1000 * 1000; /* 100 ms */
235 rc->min_timeout = 1; 200 rc->min_timeout = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
244 rc->s_idle = loop_set_idle; 209 rc->s_idle = loop_set_idle;
245 rc->s_learning_mode = loop_set_learning_mode; 210 rc->s_learning_mode = loop_set_learning_mode;
246 rc->s_carrier_report = loop_set_carrier_report; 211 rc->s_carrier_report = loop_set_carrier_report;
247 rc->s_wakeup_filter = loop_set_wakeup_filter;
248 212
249 loopdev.txmask = RXMASK_REGULAR; 213 loopdev.txmask = RXMASK_REGULAR;
250 loopdev.txcarrier = 36000; 214 loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db65280..0ff388a16168 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
865 } else { 865 } else {
866 enabled = dev->enabled_wakeup_protocols; 866 enabled = dev->enabled_wakeup_protocols;
867 allowed = dev->allowed_wakeup_protocols; 867 allowed = dev->allowed_wakeup_protocols;
868 if (dev->encode_wakeup && !allowed)
869 allowed = ir_raw_get_encode_protocols();
870 } 868 }
871 869
872 mutex_unlock(&dev->lock); 870 mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
1408 path ? path : "N/A"); 1406 path ? path : "N/A");
1409 kfree(path); 1407 kfree(path);
1410 1408
1411 if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) { 1409 if (dev->driver_type == RC_DRIVER_IR_RAW) {
1412 /* Load raw decoders, if they aren't already */ 1410 /* Load raw decoders, if they aren't already */
1413 if (!raw_init) { 1411 if (!raw_init) {
1414 IR_dprintk(1, "Loading raw decoders\n"); 1412 IR_dprintk(1, "Loading raw decoders\n");
1415 ir_raw_init(); 1413 ir_raw_init();
1416 raw_init = true; 1414 raw_init = true;
1417 } 1415 }
1418 }
1419
1420 if (dev->driver_type == RC_DRIVER_IR_RAW) {
1421 /* calls ir_register_device so unlock mutex here*/ 1416 /* calls ir_register_device so unlock mutex here*/
1422 mutex_unlock(&dev->lock); 1417 mutex_unlock(&dev->lock);
1423 rc = ir_raw_event_register(dev); 1418 rc = ir_raw_event_register(dev);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b315459098..a14c428f70e9 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
715 break; 715 break;
716 case VB2_BUF_STATE_PREPARING: 716 case VB2_BUF_STATE_PREPARING:
717 case VB2_BUF_STATE_DEQUEUED: 717 case VB2_BUF_STATE_DEQUEUED:
718 case VB2_BUF_STATE_REQUEUEING:
718 /* nothing */ 719 /* nothing */
719 break; 720 break;
720 } 721 }
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1182 1183
1183 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1184 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1184 state != VB2_BUF_STATE_ERROR && 1185 state != VB2_BUF_STATE_ERROR &&
1185 state != VB2_BUF_STATE_QUEUED)) 1186 state != VB2_BUF_STATE_QUEUED &&
1187 state != VB2_BUF_STATE_REQUEUEING))
1186 state = VB2_BUF_STATE_ERROR; 1188 state = VB2_BUF_STATE_ERROR;
1187 1189
1188#ifdef CONFIG_VIDEO_ADV_DEBUG 1190#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1199 for (plane = 0; plane < vb->num_planes; ++plane) 1201 for (plane = 0; plane < vb->num_planes; ++plane)
1200 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 1202 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
1201 1203
1202 /* Add the buffer to the done buffers list */
1203 spin_lock_irqsave(&q->done_lock, flags); 1204 spin_lock_irqsave(&q->done_lock, flags);
1204 vb->state = state; 1205 if (state == VB2_BUF_STATE_QUEUED ||
1205 if (state != VB2_BUF_STATE_QUEUED) 1206 state == VB2_BUF_STATE_REQUEUEING) {
1207 vb->state = VB2_BUF_STATE_QUEUED;
1208 } else {
1209 /* Add the buffer to the done buffers list */
1206 list_add_tail(&vb->done_entry, &q->done_list); 1210 list_add_tail(&vb->done_entry, &q->done_list);
1211 vb->state = state;
1212 }
1207 atomic_dec(&q->owned_by_drv_count); 1213 atomic_dec(&q->owned_by_drv_count);
1208 spin_unlock_irqrestore(&q->done_lock, flags); 1214 spin_unlock_irqrestore(&q->done_lock, flags);
1209 1215
1210 if (state == VB2_BUF_STATE_QUEUED) { 1216 switch (state) {
1217 case VB2_BUF_STATE_QUEUED:
1218 return;
1219 case VB2_BUF_STATE_REQUEUEING:
1211 if (q->start_streaming_called) 1220 if (q->start_streaming_called)
1212 __enqueue_in_driver(vb); 1221 __enqueue_in_driver(vb);
1213 return; 1222 return;
1223 default:
1224 /* Inform any processes that may be waiting for buffers */
1225 wake_up(&q->done_wq);
1226 break;
1214 } 1227 }
1215
1216 /* Inform any processes that may be waiting for buffers */
1217 wake_up(&q->done_wq);
1218} 1228}
1219EXPORT_SYMBOL_GPL(vb2_buffer_done); 1229EXPORT_SYMBOL_GPL(vb2_buffer_done);
1220 1230
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
1244 1254
1245static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) 1255static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
1246{ 1256{
1247 static bool __check_once __read_mostly; 1257 static bool check_once;
1248 1258
1249 if (__check_once) 1259 if (check_once)
1250 return; 1260 return;
1251 1261
1252 __check_once = true; 1262 check_once = true;
1253 __WARN(); 1263 WARN_ON(1);
1254 1264
1255 pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n"); 1265 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
1256 if (vb->vb2_queue->allow_zero_bytesused) 1266 if (vb->vb2_queue->allow_zero_bytesused)
1257 pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); 1267 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
1258 else 1268 else
1259 pr_warn_once("use the actual size instead.\n"); 1269 pr_warn("use the actual size instead.\n");
1260} 1270}
1261 1271
1262/** 1272/**
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 8911e51d410a..9426276dbe14 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2074,14 +2074,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
2074 ret = gpmc_probe_nand_child(pdev, child); 2074 ret = gpmc_probe_nand_child(pdev, child);
2075 else if (of_node_cmp(child->name, "onenand") == 0) 2075 else if (of_node_cmp(child->name, "onenand") == 0)
2076 ret = gpmc_probe_onenand_child(pdev, child); 2076 ret = gpmc_probe_onenand_child(pdev, child);
2077 else if (of_node_cmp(child->name, "ethernet") == 0 || 2077 else
2078 of_node_cmp(child->name, "nor") == 0 ||
2079 of_node_cmp(child->name, "uart") == 0)
2080 ret = gpmc_probe_generic_child(pdev, child); 2078 ret = gpmc_probe_generic_child(pdev, child);
2081
2082 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
2083 __func__, child->full_name))
2084 of_node_put(child);
2085 } 2079 }
2086 2080
2087 return 0; 2081 return 0;
@@ -2251,6 +2245,9 @@ void omap3_gpmc_save_context(void)
2251{ 2245{
2252 int i; 2246 int i;
2253 2247
2248 if (!gpmc_base)
2249 return;
2250
2254 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); 2251 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2255 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); 2252 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2256 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); 2253 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2283,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
2283{ 2280{
2284 int i; 2281 int i;
2285 2282
2283 if (!gpmc_base)
2284 return;
2285
2286 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); 2286 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2287 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); 2287 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2288 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); 2288 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 653815950aa2..3f68dd251ce8 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
115 115
116config MFD_CROS_EC_SPI 116config MFD_CROS_EC_SPI
117 tristate "ChromeOS Embedded Controller (SPI)" 117 tristate "ChromeOS Embedded Controller (SPI)"
118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF 118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
119 119
120 ---help--- 120 ---help---
121 If you say Y here, you get support for talking to the ChromeOS EC 121 If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..0ce20ce170c4 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -392,7 +392,7 @@ err:
392 * Register patch to some of the CODECs internal write sequences 392 * Register patch to some of the CODECs internal write sequences
393 * to ensure a clean exit from the low power sleep state. 393 * to ensure a clean exit from the low power sleep state.
394 */ 394 */
395static const struct reg_default wm5110_sleep_patch[] = { 395static const struct reg_sequence wm5110_sleep_patch[] = {
396 { 0x337A, 0xC100 }, 396 { 0x337A, 0xC100 },
397 { 0x337B, 0x0041 }, 397 { 0x337B, 0x0041 },
398 { 0x3300, 0xA210 }, 398 { 0x3300, 0xA210 },
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
651 651
652 arizona->has_fully_powered_off = true; 652 arizona->has_fully_powered_off = true;
653 653
654 disable_irq(arizona->irq); 654 disable_irq_nosync(arizona->irq);
655 arizona_enable_reset(arizona); 655 arizona_enable_reset(arizona);
656 regulator_bulk_disable(arizona->num_core_supplies, 656 regulator_bulk_disable(arizona->num_core_supplies,
657 arizona->core_supplies); 657 arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
1141 arizona->pdata.gpio_defaults[i]); 1141 arizona->pdata.gpio_defaults[i]);
1142 } 1142 }
1143 1143
1144 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1145 pm_runtime_use_autosuspend(arizona->dev);
1146 pm_runtime_enable(arizona->dev);
1147
1148 /* Chip default */ 1144 /* Chip default */
1149 if (!arizona->pdata.clk32k_src) 1145 if (!arizona->pdata.clk32k_src)
1150 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; 1146 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
1245 arizona->pdata.spk_fmt[i]); 1241 arizona->pdata.spk_fmt[i]);
1246 } 1242 }
1247 1243
1244 pm_runtime_set_active(arizona->dev);
1245 pm_runtime_enable(arizona->dev);
1246
1248 /* Set up for interrupts */ 1247 /* Set up for interrupts */
1249 ret = arizona_irq_init(arizona); 1248 ret = arizona_irq_init(arizona);
1250 if (ret != 0) 1249 if (ret != 0)
1251 goto err_reset; 1250 goto err_reset;
1252 1251
1252 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1253 pm_runtime_use_autosuspend(arizona->dev);
1254
1253 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", 1255 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
1254 arizona_clkgen_err, arizona); 1256 arizona_clkgen_err, arizona);
1255 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", 1257 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
1278 goto err_irq; 1280 goto err_irq;
1279 } 1281 }
1280 1282
1281#ifdef CONFIG_PM
1282 regulator_disable(arizona->dcvdd);
1283#endif
1284
1285 return 0; 1283 return 0;
1286 1284
1287err_irq: 1285err_irq:
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 5c054031c3f8..e14c8c9d189b 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * License Terms: GNU General Public License, version 2 7 * License Terms: GNU General Public License, version 2
8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson 8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
9 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics 9 * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
10 */ 10 */
11 11
12#include <linux/i2c.h> 12#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index a81badbaa917..6fdb30e84a2b 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
4 * Copyright (C) ST Microelectronics SA 2011 4 * Copyright (C) ST Microelectronics SA 2011
5 * 5 *
6 * License Terms: GNU General Public License, version 2 6 * License Terms: GNU General Public License, version 2
7 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics 7 * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
8 */ 8 */
9 9
10#include <linux/spi/spi.h> 10#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
146 146
147MODULE_LICENSE("GPL v2"); 147MODULE_LICENSE("GPL v2");
148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); 148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
149MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 149MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c5265c1262c5..583dc33432f3 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -86,7 +86,7 @@ static const struct reg_default twl6040_defaults[] = {
86 { 0x2E, 0x00 }, /* REG_STATUS (ro) */ 86 { 0x2E, 0x00 }, /* REG_STATUS (ro) */
87}; 87};
88 88
89static struct reg_default twl6040_patch[] = { 89static struct reg_sequence twl6040_patch[] = {
90 /* 90 /*
91 * Select I2C bus access to dual access registers 91 * Select I2C bus access to dual access registers
92 * Interrupt register is cleared on read 92 * Interrupt register is cleared on read
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index aeae6ec123b3..423fb3730dc7 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -21,7 +21,7 @@
21#define WM5102_NUM_AOD_ISR 2 21#define WM5102_NUM_AOD_ISR 2
22#define WM5102_NUM_ISR 5 22#define WM5102_NUM_ISR 5
23 23
24static const struct reg_default wm5102_reva_patch[] = { 24static const struct reg_sequence wm5102_reva_patch[] = {
25 { 0x80, 0x0003 }, 25 { 0x80, 0x0003 },
26 { 0x221, 0x0090 }, 26 { 0x221, 0x0090 },
27 { 0x211, 0x0014 }, 27 { 0x211, 0x0014 },
@@ -57,7 +57,7 @@ static const struct reg_default wm5102_reva_patch[] = {
57 { 0x80, 0x0000 }, 57 { 0x80, 0x0000 },
58}; 58};
59 59
60static const struct reg_default wm5102_revb_patch[] = { 60static const struct reg_sequence wm5102_revb_patch[] = {
61 { 0x19, 0x0001 }, 61 { 0x19, 0x0001 },
62 { 0x80, 0x0003 }, 62 { 0x80, 0x0003 },
63 { 0x081, 0xE022 }, 63 { 0x081, 0xE022 },
@@ -80,7 +80,7 @@ static const struct reg_default wm5102_revb_patch[] = {
80/* We use a function so we can use ARRAY_SIZE() */ 80/* We use a function so we can use ARRAY_SIZE() */
81int wm5102_patch(struct arizona *arizona) 81int wm5102_patch(struct arizona *arizona)
82{ 82{
83 const struct reg_default *wm5102_patch; 83 const struct reg_sequence *wm5102_patch;
84 int patch_size; 84 int patch_size;
85 85
86 switch (arizona->rev) { 86 switch (arizona->rev) {
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 12cad94b4035..26ce14f903fe 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -21,7 +21,7 @@
21#define WM5110_NUM_AOD_ISR 2 21#define WM5110_NUM_AOD_ISR 2
22#define WM5110_NUM_ISR 5 22#define WM5110_NUM_ISR 5
23 23
24static const struct reg_default wm5110_reva_patch[] = { 24static const struct reg_sequence wm5110_reva_patch[] = {
25 { 0x80, 0x3 }, 25 { 0x80, 0x3 },
26 { 0x44, 0x20 }, 26 { 0x44, 0x20 },
27 { 0x45, 0x40 }, 27 { 0x45, 0x40 },
@@ -134,7 +134,7 @@ static const struct reg_default wm5110_reva_patch[] = {
134 { 0x209, 0x002A }, 134 { 0x209, 0x002A },
135}; 135};
136 136
137static const struct reg_default wm5110_revb_patch[] = { 137static const struct reg_sequence wm5110_revb_patch[] = {
138 { 0x80, 0x3 }, 138 { 0x80, 0x3 },
139 { 0x36e, 0x0210 }, 139 { 0x36e, 0x0210 },
140 { 0x370, 0x0210 }, 140 { 0x370, 0x0210 },
@@ -224,7 +224,7 @@ static const struct reg_default wm5110_revb_patch[] = {
224 { 0x80, 0x0 }, 224 { 0x80, 0x0 },
225}; 225};
226 226
227static const struct reg_default wm5110_revd_patch[] = { 227static const struct reg_sequence wm5110_revd_patch[] = {
228 { 0x80, 0x3 }, 228 { 0x80, 0x3 },
229 { 0x80, 0x3 }, 229 { 0x80, 0x3 },
230 { 0x393, 0x27 }, 230 { 0x393, 0x27 },
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 53ae5af5d6e4..0f4169a3a5d4 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -243,21 +243,21 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
243} 243}
244#endif 244#endif
245 245
246static const struct reg_default wm8994_revc_patch[] = { 246static const struct reg_sequence wm8994_revc_patch[] = {
247 { 0x102, 0x3 }, 247 { 0x102, 0x3 },
248 { 0x56, 0x3 }, 248 { 0x56, 0x3 },
249 { 0x817, 0x0 }, 249 { 0x817, 0x0 },
250 { 0x102, 0x0 }, 250 { 0x102, 0x0 },
251}; 251};
252 252
253static const struct reg_default wm8958_reva_patch[] = { 253static const struct reg_sequence wm8958_reva_patch[] = {
254 { 0x102, 0x3 }, 254 { 0x102, 0x3 },
255 { 0xcb, 0x81 }, 255 { 0xcb, 0x81 },
256 { 0x817, 0x0 }, 256 { 0x817, 0x0 },
257 { 0x102, 0x0 }, 257 { 0x102, 0x0 },
258}; 258};
259 259
260static const struct reg_default wm1811_reva_patch[] = { 260static const struct reg_sequence wm1811_reva_patch[] = {
261 { 0x102, 0x3 }, 261 { 0x102, 0x3 },
262 { 0x56, 0xc07 }, 262 { 0x56, 0xc07 },
263 { 0x5d, 0x7e }, 263 { 0x5d, 0x7e },
@@ -326,7 +326,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
326{ 326{
327 struct wm8994_pdata *pdata; 327 struct wm8994_pdata *pdata;
328 struct regmap_config *regmap_config; 328 struct regmap_config *regmap_config;
329 const struct reg_default *regmap_patch = NULL; 329 const struct reg_sequence *regmap_patch = NULL;
330 const char *devname; 330 const char *devname;
331 int ret, i, patch_regs = 0; 331 int ret, i, patch_regs = 0;
332 int pulls = 0; 332 int pulls = 0;
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index c0c25d75aacc..cab2c68f1737 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -17,7 +17,7 @@
17 17
18#include "arizona.h" 18#include "arizona.h"
19 19
20static const struct reg_default wm8997_reva_patch[] = { 20static const struct reg_sequence wm8997_reva_patch[] = {
21 { 0x80, 0x0003 }, 21 { 0x80, 0x0003 },
22 { 0x214, 0x0008 }, 22 { 0x214, 0x0008 },
23 { 0x458, 0x0000 }, 23 { 0x458, 0x0000 },
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 0c77240ae2fc..729e0851167d 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
23 23
24 afu = cxl_pci_to_afu(dev); 24 afu = cxl_pci_to_afu(dev);
25 25
26 get_device(&afu->dev);
26 ctx = cxl_context_alloc(); 27 ctx = cxl_context_alloc();
27 if (IS_ERR(ctx)) 28 if (IS_ERR(ctx))
28 return ctx; 29 return ctx;
@@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
31 rc = cxl_context_init(ctx, afu, false, NULL); 32 rc = cxl_context_init(ctx, afu, false, NULL);
32 if (rc) { 33 if (rc) {
33 kfree(ctx); 34 kfree(ctx);
35 put_device(&afu->dev);
34 return ERR_PTR(-ENOMEM); 36 return ERR_PTR(-ENOMEM);
35 } 37 }
36 cxl_assign_psn_space(ctx); 38 cxl_assign_psn_space(ctx);
@@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx)
60 if (ctx->status != CLOSED) 62 if (ctx->status != CLOSED)
61 return -EBUSY; 63 return -EBUSY;
62 64
65 put_device(&ctx->afu->dev);
66
63 cxl_context_free(ctx); 67 cxl_context_free(ctx);
64 68
65 return 0; 69 return 0;
@@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
159 } 163 }
160 164
161 ctx->status = STARTED; 165 ctx->status = STARTED;
162 get_device(&ctx->afu->dev);
163out: 166out:
164 mutex_unlock(&ctx->status_mutex); 167 mutex_unlock(&ctx->status_mutex);
165 return rc; 168 return rc;
@@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element);
175/* Stop a context. Returns 0 on success, otherwise -Errno */ 178/* Stop a context. Returns 0 on success, otherwise -Errno */
176int cxl_stop_context(struct cxl_context *ctx) 179int cxl_stop_context(struct cxl_context *ctx)
177{ 180{
178 int rc; 181 return __detach_context(ctx);
179
180 rc = __detach_context(ctx);
181 if (!rc)
182 put_device(&ctx->afu->dev);
183 return rc;
184} 182}
185EXPORT_SYMBOL_GPL(cxl_stop_context); 183EXPORT_SYMBOL_GPL(cxl_stop_context);
186 184
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 2a4c80ac322a..1287148629c0 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
113 113
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { 114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
115 area = ctx->afu->psn_phys; 115 area = ctx->afu->psn_phys;
116 if (offset > ctx->afu->adapter->ps_size) 116 if (offset >= ctx->afu->adapter->ps_size)
117 return VM_FAULT_SIGBUS; 117 return VM_FAULT_SIGBUS;
118 } else { 118 } else {
119 area = ctx->psn_phys; 119 area = ctx->psn_phys;
120 if (offset > ctx->psn_size) 120 if (offset >= ctx->psn_size)
121 return VM_FAULT_SIGBUS; 121 return VM_FAULT_SIGBUS;
122 } 122 }
123 123
@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
145 */ 145 */
146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) 146int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
147{ 147{
148 u64 start = vma->vm_pgoff << PAGE_SHIFT;
148 u64 len = vma->vm_end - vma->vm_start; 149 u64 len = vma->vm_end - vma->vm_start;
149 len = min(len, ctx->psn_size); 150
151 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
152 if (start + len > ctx->afu->adapter->ps_size)
153 return -EINVAL;
154 } else {
155 if (start + len > ctx->psn_size)
156 return -EINVAL;
157 }
150 158
151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { 159 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
152 /* make sure there is a valid per process space for this AFU */ 160 /* make sure there is a valid per process space for this AFU */
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 833348e2c9cb..4a164ab8b35a 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
73 spin_lock(&adapter->afu_list_lock); 73 spin_lock(&adapter->afu_list_lock);
74 for (slice = 0; slice < adapter->slices; slice++) { 74 for (slice = 0; slice < adapter->slices; slice++) {
75 afu = adapter->afu[slice]; 75 afu = adapter->afu[slice];
76 if (!afu->enabled) 76 if (!afu || !afu->enabled)
77 continue; 77 continue;
78 rcu_read_lock(); 78 rcu_read_lock();
79 idr_for_each_entry(&afu->contexts_idr, ctx, id) 79 idr_for_each_entry(&afu->contexts_idr, ctx, id)
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index c68ef5806dbe..32ad09705949 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -539,7 +539,7 @@ err:
539 539
540static void cxl_unmap_slice_regs(struct cxl_afu *afu) 540static void cxl_unmap_slice_regs(struct cxl_afu *afu)
541{ 541{
542 if (afu->p1n_mmio) 542 if (afu->p2n_mmio)
543 iounmap(afu->p2n_mmio); 543 iounmap(afu->p2n_mmio);
544 if (afu->p1n_mmio) 544 if (afu->p1n_mmio)
545 iounmap(afu->p1n_mmio); 545 iounmap(afu->p1n_mmio);
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index b1d1983a84a5..2eba002b580b 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
112 unsigned long addr; 112 unsigned long addr;
113 113
114 phb = pci_bus_to_host(bus); 114 phb = pci_bus_to_host(bus);
115 afu = (struct cxl_afu *)phb->private_data;
116 if (phb == NULL) 115 if (phb == NULL)
117 return PCIBIOS_DEVICE_NOT_FOUND; 116 return PCIBIOS_DEVICE_NOT_FOUND;
117 afu = (struct cxl_afu *)phb->private_data;
118
118 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) 119 if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
119 return PCIBIOS_DEVICE_NOT_FOUND; 120 return PCIBIOS_DEVICE_NOT_FOUND;
120 if (offset >= (unsigned long)phb->cfg_data) 121 if (offset >= (unsigned long)phb->cfg_data)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2d3db81be099..6ded3dc36644 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
438{ 438{
439 struct at24_data *at24; 439 struct at24_data *at24;
440 440
441 if (unlikely(off >= attr->size))
442 return -EFBIG;
443
444 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); 441 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
445 return at24_write(at24, buf, off, count); 442 return at24_write(at24, buf, off, count);
446} 443}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 357b6ae4d207..458aa5a09c52 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -552,22 +552,6 @@ void mei_cl_bus_rx_event(struct mei_cl *cl)
552 schedule_work(&device->event_work); 552 schedule_work(&device->event_work);
553} 553}
554 554
555void mei_cl_bus_remove_devices(struct mei_device *dev)
556{
557 struct mei_cl *cl, *next;
558
559 mutex_lock(&dev->device_lock);
560 list_for_each_entry_safe(cl, next, &dev->device_list, device_link) {
561 if (cl->device)
562 mei_cl_remove_device(cl->device);
563
564 list_del(&cl->device_link);
565 mei_cl_unlink(cl);
566 kfree(cl);
567 }
568 mutex_unlock(&dev->device_lock);
569}
570
571int __init mei_cl_bus_init(void) 555int __init mei_cl_bus_init(void)
572{ 556{
573 return bus_register(&mei_cl_bus_type); 557 return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 94514b2c7a50..00c3865ca3b1 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -333,8 +333,6 @@ void mei_stop(struct mei_device *dev)
333 333
334 mei_nfc_host_exit(dev); 334 mei_nfc_host_exit(dev);
335 335
336 mei_cl_bus_remove_devices(dev);
337
338 mutex_lock(&dev->device_lock); 336 mutex_lock(&dev->device_lock);
339 337
340 mei_wd_stop(dev); 338 mei_wd_stop(dev);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..e9513d651cd3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
682 /* Fill in the data structures */ 682 /* Fill in the data structures */
683 devno = MKDEV(MAJOR(mei_devt), dev->minor); 683 devno = MKDEV(MAJOR(mei_devt), dev->minor);
684 cdev_init(&dev->cdev, &mei_fops); 684 cdev_init(&dev->cdev, &mei_fops);
685 dev->cdev.owner = mei_fops.owner; 685 dev->cdev.owner = parent->driver->owner;
686 686
687 /* Add the device */ 687 /* Add the device */
688 ret = cdev_add(&dev->cdev, devno, 1); 688 ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index b983c4ecad38..290ef3037437 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -402,11 +402,12 @@ void mei_nfc_host_exit(struct mei_device *dev)
402 402
403 cldev->priv_data = NULL; 403 cldev->priv_data = NULL;
404 404
405 mutex_lock(&dev->device_lock);
406 /* Need to remove the device here 405 /* Need to remove the device here
407 * since mei_nfc_free will unlink the clients 406 * since mei_nfc_free will unlink the clients
408 */ 407 */
409 mei_cl_remove_device(cldev); 408 mei_cl_remove_device(cldev);
409
410 mutex_lock(&dev->device_lock);
410 mei_nfc_free(ndev); 411 mei_nfc_free(ndev);
411 mutex_unlock(&dev->device_lock); 412 mutex_unlock(&dev->device_lock);
412} 413}
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
357} 357}
358 358
359static struct scatterlist * 359static struct scatterlist *
360scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) 360scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
361{ 361{
362 struct scatterlist *sg; 362 struct scatterlist *sg;
363 struct page *page; 363 struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
368 return NULL; 368 return NULL;
369 sg_init_table(sg, page_cnt); 369 sg_init_table(sg, page_cnt);
370 for (i = 0; i < page_cnt; i++) { 370 for (i = 0; i < page_cnt; i++) {
371 page = vmalloc_to_page((void __force *)va); 371 page = pfn_to_page(pa >> PAGE_SHIFT);
372 if (!page)
373 goto p2p_sg_err;
374 sg_set_page(&sg[i], page, page_size, 0); 372 sg_set_page(&sg[i], page, page_size, 0);
375 va += page_size; 373 pa += page_size;
376 } 374 }
377 return sg; 375 return sg;
378p2p_sg_err:
379 kfree(sg);
380 return NULL;
381} 376}
382 377
383/* Init p2p mappings required to access peerdev from scifdev */ 378/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
395 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); 390 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
396 if (!p2p) 391 if (!p2p)
397 return NULL; 392 return NULL;
398 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, 393 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
399 PAGE_SIZE, num_mmio_pages); 394 PAGE_SIZE, num_mmio_pages);
400 if (!p2p->ppi_sg[SCIF_PPI_MMIO]) 395 if (!p2p->ppi_sg[SCIF_PPI_MMIO])
401 goto free_p2p; 396 goto free_p2p;
402 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; 397 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
403 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); 398 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
404 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); 399 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
405 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, 400 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
406 1 << sg_page_shift, 401 1 << sg_page_shift,
407 num_aper_chunks); 402 num_aper_chunks);
408 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; 403 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..a1b820fcb2a6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
208 208
209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210 210
211 mmc_blk_put(md);
212
211 return ret; 213 return ret;
212} 214}
213 215
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..6a0f9c79be26 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
779 779
780config MMC_MTK 780config MMC_MTK
781 tristate "MediaTek SD/MMC Card Interface support" 781 tristate "MediaTek SD/MMC Card Interface support"
782 depends on HAS_DMA
782 help 783 help
783 This selects the MediaTek(R) Secure digital and Multimedia card Interface. 784 This selects the MediaTek(R) Secure digital and Multimedia card Interface.
784 If you have a machine with a integrated SD/MMC card reader, say Y or M here. 785 If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..4d1203236890 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1062 1062
1063 if (status & (CTO_EN | CCRC_EN)) 1063 if (status & (CTO_EN | CCRC_EN))
1064 end_cmd = 1; 1064 end_cmd = 1;
1065 if (host->data || host->response_busy) {
1066 end_trans = !end_cmd;
1067 host->response_busy = 0;
1068 }
1065 if (status & (CTO_EN | DTO_EN)) 1069 if (status & (CTO_EN | DTO_EN))
1066 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1070 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1067 else if (status & (CCRC_EN | DCRC_EN)) 1071 else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
1072 BADA_EN))
1068 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1073 hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1069 1074
1070 if (status & ACE_EN) { 1075 if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1081 } 1086 }
1082 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1087 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
1083 } 1088 }
1084 if (host->data || host->response_busy) {
1085 end_trans = !end_cmd;
1086 host->response_busy = 0;
1087 }
1088 } 1089 }
1089 1090
1090 OMAP_HSMMC_WRITE(host->base, STAT, status); 1091 OMAP_HSMMC_WRITE(host->base, STAT, status);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..c6b9f6492e1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
582{ 582{
583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
584 struct pltfm_imx_data *imx_data = pltfm_host->priv;
585 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
586 584
587 if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) 585 return pltfm_host->clock;
588 return boarddata->f_max;
589 else
590 return pltfm_host->clock;
591} 586}
592 587
593static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) 588static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
878static int 873static int
879sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 874sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
880 struct sdhci_host *host, 875 struct sdhci_host *host,
881 struct esdhc_platform_data *boarddata) 876 struct pltfm_imx_data *imx_data)
882{ 877{
883 struct device_node *np = pdev->dev.of_node; 878 struct device_node *np = pdev->dev.of_node;
884 879 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
885 if (!np) 880 int ret;
886 return -ENODEV;
887
888 if (of_get_property(np, "non-removable", NULL))
889 boarddata->cd_type = ESDHC_CD_PERMANENT;
890
891 if (of_get_property(np, "fsl,cd-controller", NULL))
892 boarddata->cd_type = ESDHC_CD_CONTROLLER;
893 881
894 if (of_get_property(np, "fsl,wp-controller", NULL)) 882 if (of_get_property(np, "fsl,wp-controller", NULL))
895 boarddata->wp_type = ESDHC_WP_CONTROLLER; 883 boarddata->wp_type = ESDHC_WP_CONTROLLER;
896 884
897 boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
898 if (gpio_is_valid(boarddata->cd_gpio))
899 boarddata->cd_type = ESDHC_CD_GPIO;
900
901 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 885 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
902 if (gpio_is_valid(boarddata->wp_gpio)) 886 if (gpio_is_valid(boarddata->wp_gpio))
903 boarddata->wp_type = ESDHC_WP_GPIO; 887 boarddata->wp_type = ESDHC_WP_GPIO;
904 888
905 of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
906
907 of_property_read_u32(np, "max-frequency", &boarddata->f_max);
908
909 if (of_find_property(np, "no-1-8-v", NULL)) 889 if (of_find_property(np, "no-1-8-v", NULL))
910 boarddata->support_vsel = false; 890 boarddata->support_vsel = false;
911 else 891 else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
916 896
917 mmc_of_parse_voltage(np, &host->ocr_mask); 897 mmc_of_parse_voltage(np, &host->ocr_mask);
918 898
899 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
900 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
901 !IS_ERR(imx_data->pins_default)) {
902 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
903 ESDHC_PINCTRL_STATE_100MHZ);
904 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
905 ESDHC_PINCTRL_STATE_200MHZ);
906 if (IS_ERR(imx_data->pins_100mhz) ||
907 IS_ERR(imx_data->pins_200mhz)) {
908 dev_warn(mmc_dev(host->mmc),
909 "could not get ultra high speed state, work on normal mode\n");
910 /*
911 * fall back to not support uhs by specify no 1.8v quirk
912 */
913 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
914 }
915 } else {
916 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
917 }
918
919 /* call to generic mmc_of_parse to support additional capabilities */ 919 /* call to generic mmc_of_parse to support additional capabilities */
920 return mmc_of_parse(host->mmc); 920 ret = mmc_of_parse(host->mmc);
921 if (ret)
922 return ret;
923
924 if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
925 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
926
927 return 0;
921} 928}
922#else 929#else
923static inline int 930static inline int
924sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 931sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
925 struct sdhci_host *host, 932 struct sdhci_host *host,
926 struct esdhc_platform_data *boarddata) 933 struct pltfm_imx_data *imx_data)
927{ 934{
928 return -ENODEV; 935 return -ENODEV;
929} 936}
930#endif 937#endif
931 938
939static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
940 struct sdhci_host *host,
941 struct pltfm_imx_data *imx_data)
942{
943 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
944 int err;
945
946 if (!host->mmc->parent->platform_data) {
947 dev_err(mmc_dev(host->mmc), "no board data!\n");
948 return -EINVAL;
949 }
950
951 imx_data->boarddata = *((struct esdhc_platform_data *)
952 host->mmc->parent->platform_data);
953 /* write_protect */
954 if (boarddata->wp_type == ESDHC_WP_GPIO) {
955 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
956 if (err) {
957 dev_err(mmc_dev(host->mmc),
958 "failed to request write-protect gpio!\n");
959 return err;
960 }
961 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
962 }
963
964 /* card_detect */
965 switch (boarddata->cd_type) {
966 case ESDHC_CD_GPIO:
967 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
968 if (err) {
969 dev_err(mmc_dev(host->mmc),
970 "failed to request card-detect gpio!\n");
971 return err;
972 }
973 /* fall through */
974
975 case ESDHC_CD_CONTROLLER:
976 /* we have a working card_detect back */
977 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
978 break;
979
980 case ESDHC_CD_PERMANENT:
981 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
982 break;
983
984 case ESDHC_CD_NONE:
985 break;
986 }
987
988 switch (boarddata->max_bus_width) {
989 case 8:
990 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
991 break;
992 case 4:
993 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
994 break;
995 case 1:
996 default:
997 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
998 break;
999 }
1000
1001 return 0;
1002}
1003
932static int sdhci_esdhc_imx_probe(struct platform_device *pdev) 1004static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
933{ 1005{
934 const struct of_device_id *of_id = 1006 const struct of_device_id *of_id =
935 of_match_device(imx_esdhc_dt_ids, &pdev->dev); 1007 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
936 struct sdhci_pltfm_host *pltfm_host; 1008 struct sdhci_pltfm_host *pltfm_host;
937 struct sdhci_host *host; 1009 struct sdhci_host *host;
938 struct esdhc_platform_data *boarddata;
939 int err; 1010 int err;
940 struct pltfm_imx_data *imx_data; 1011 struct pltfm_imx_data *imx_data;
941 bool dt = true;
942 1012
943 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); 1013 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
944 if (IS_ERR(host)) 1014 if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1030 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1100 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
1031 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1101 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1032 1102
1033 boarddata = &imx_data->boarddata; 1103 if (of_id)
1034 if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { 1104 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
1035 if (!host->mmc->parent->platform_data) { 1105 else
1036 dev_err(mmc_dev(host->mmc), "no board data!\n"); 1106 err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
1037 err = -EINVAL; 1107 if (err)
1038 goto disable_clk; 1108 goto disable_clk;
1039 }
1040 imx_data->boarddata = *((struct esdhc_platform_data *)
1041 host->mmc->parent->platform_data);
1042 dt = false;
1043 }
1044 /* write_protect */
1045 if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
1046 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
1047 if (err) {
1048 dev_err(mmc_dev(host->mmc),
1049 "failed to request write-protect gpio!\n");
1050 goto disable_clk;
1051 }
1052 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1053 }
1054
1055 /* card_detect */
1056 switch (boarddata->cd_type) {
1057 case ESDHC_CD_GPIO:
1058 if (dt)
1059 break;
1060 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
1061 if (err) {
1062 dev_err(mmc_dev(host->mmc),
1063 "failed to request card-detect gpio!\n");
1064 goto disable_clk;
1065 }
1066 /* fall through */
1067
1068 case ESDHC_CD_CONTROLLER:
1069 /* we have a working card_detect back */
1070 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1071 break;
1072
1073 case ESDHC_CD_PERMANENT:
1074 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
1075 break;
1076
1077 case ESDHC_CD_NONE:
1078 break;
1079 }
1080
1081 switch (boarddata->max_bus_width) {
1082 case 8:
1083 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
1084 break;
1085 case 4:
1086 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
1087 break;
1088 case 1:
1089 default:
1090 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
1091 break;
1092 }
1093
1094 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
1095 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
1096 !IS_ERR(imx_data->pins_default)) {
1097 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
1098 ESDHC_PINCTRL_STATE_100MHZ);
1099 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1100 ESDHC_PINCTRL_STATE_200MHZ);
1101 if (IS_ERR(imx_data->pins_100mhz) ||
1102 IS_ERR(imx_data->pins_200mhz)) {
1103 dev_warn(mmc_dev(host->mmc),
1104 "could not get ultra high speed state, work on normal mode\n");
1105 /* fall back to not support uhs by specify no 1.8v quirk */
1106 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1107 }
1108 } else {
1109 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1110 }
1111 1109
1112 err = sdhci_add_host(host); 1110 err = sdhci_add_host(host);
1113 if (err) 1111 if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..a870c42731d7 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -45,6 +45,6 @@
45#define ESDHC_DMA_SYSCTL 0x40c 45#define ESDHC_DMA_SYSCTL 0x40c
46#define ESDHC_DMA_SNOOP 0x00000040 46#define ESDHC_DMA_SNOOP 0x00000040
47 47
48#define ESDHC_HOST_CONTROL_RES 0x05 48#define ESDHC_HOST_CONTROL_RES 0x01
49 49
50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ 50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
411 goto err_of_parse; 411 goto err_of_parse;
412 sdhci_get_of_property(pdev); 412 sdhci_get_of_property(pdev);
413 pdata = pxav3_get_mmc_pdata(dev); 413 pdata = pxav3_get_mmc_pdata(dev);
414 pdev->dev.platform_data = pdata;
414 } else if (pdata) { 415 } else if (pdata) {
415 /* on-chip device */ 416 /* on-chip device */
416 if (pdata->flags & PXA_FLAG_CARD_PERMANENT) 417 if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index df088343d60f..255a896769b8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
4 * Support of SDHCI platform devices for spear soc family 4 * Support of SDHCI platform devices for spear soc family
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * Inspired by sdhci-pltfm.c 9 * Inspired by sdhci-pltfm.c
10 * 10 *
@@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = {
211module_platform_driver(sdhci_driver); 211module_platform_driver(sdhci_driver);
212 212
213MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); 213MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
214MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 214MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
215MODULE_LICENSE("GPL v2"); 215MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..1dbe93232030 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
2866 u32 max_current_caps; 2866 u32 max_current_caps;
2867 unsigned int ocr_avail; 2867 unsigned int ocr_avail;
2868 unsigned int override_timeout_clk; 2868 unsigned int override_timeout_clk;
2869 u32 max_clk;
2869 int ret; 2870 int ret;
2870 2871
2871 WARN_ON(host == NULL); 2872 WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
2978 GFP_KERNEL); 2979 GFP_KERNEL);
2979 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); 2980 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
2980 if (!host->adma_table || !host->align_buffer) { 2981 if (!host->adma_table || !host->align_buffer) {
2981 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 2982 if (host->adma_table)
2982 host->adma_table, host->adma_addr); 2983 dma_free_coherent(mmc_dev(mmc),
2984 host->adma_table_sz,
2985 host->adma_table,
2986 host->adma_addr);
2983 kfree(host->align_buffer); 2987 kfree(host->align_buffer);
2984 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2988 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2985 mmc_hostname(mmc)); 2989 mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
3047 * Set host parameters. 3051 * Set host parameters.
3048 */ 3052 */
3049 mmc->ops = &sdhci_ops; 3053 mmc->ops = &sdhci_ops;
3050 mmc->f_max = host->max_clk; 3054 max_clk = host->max_clk;
3055
3051 if (host->ops->get_min_clock) 3056 if (host->ops->get_min_clock)
3052 mmc->f_min = host->ops->get_min_clock(host); 3057 mmc->f_min = host->ops->get_min_clock(host);
3053 else if (host->version >= SDHCI_SPEC_300) { 3058 else if (host->version >= SDHCI_SPEC_300) {
3054 if (host->clk_mul) { 3059 if (host->clk_mul) {
3055 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3060 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3056 mmc->f_max = host->max_clk * host->clk_mul; 3061 max_clk = host->max_clk * host->clk_mul;
3057 } else 3062 } else
3058 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3063 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3059 } else 3064 } else
3060 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3065 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3061 3066
3067 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3068 mmc->f_max = max_clk;
3069
3062 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3070 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3063 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3071 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3064 SDHCI_TIMEOUT_CLK_SHIFT; 3072 SDHCI_TIMEOUT_CLK_SHIFT;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 19eb990d398c..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
626} 626}
627 627
628static struct slave *bond_get_old_active(struct bonding *bond,
629 struct slave *new_active)
630{
631 struct slave *slave;
632 struct list_head *iter;
633
634 bond_for_each_slave(bond, slave, iter) {
635 if (slave == new_active)
636 continue;
637
638 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
639 return slave;
640 }
641
642 return NULL;
643}
644
628/* bond_do_fail_over_mac 645/* bond_do_fail_over_mac
629 * 646 *
630 * Perform special MAC address swapping for fail_over_mac settings 647 * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
652 if (!new_active) 669 if (!new_active)
653 return; 670 return;
654 671
672 if (!old_active)
673 old_active = bond_get_old_active(bond, new_active);
674
655 if (old_active) { 675 if (old_active) {
656 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); 676 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
657 ether_addr_copy(saddr.sa_data, 677 ether_addr_copy(saddr.sa_data,
@@ -689,40 +709,57 @@ out:
689 709
690} 710}
691 711
692static bool bond_should_change_active(struct bonding *bond) 712static struct slave *bond_choose_primary_or_current(struct bonding *bond)
693{ 713{
694 struct slave *prim = rtnl_dereference(bond->primary_slave); 714 struct slave *prim = rtnl_dereference(bond->primary_slave);
695 struct slave *curr = rtnl_dereference(bond->curr_active_slave); 715 struct slave *curr = rtnl_dereference(bond->curr_active_slave);
696 716
697 if (!prim || !curr || curr->link != BOND_LINK_UP) 717 if (!prim || prim->link != BOND_LINK_UP) {
698 return true; 718 if (!curr || curr->link != BOND_LINK_UP)
719 return NULL;
720 return curr;
721 }
722
699 if (bond->force_primary) { 723 if (bond->force_primary) {
700 bond->force_primary = false; 724 bond->force_primary = false;
701 return true; 725 return prim;
726 }
727
728 if (!curr || curr->link != BOND_LINK_UP)
729 return prim;
730
731 /* At this point, prim and curr are both up */
732 switch (bond->params.primary_reselect) {
733 case BOND_PRI_RESELECT_ALWAYS:
734 return prim;
735 case BOND_PRI_RESELECT_BETTER:
736 if (prim->speed < curr->speed)
737 return curr;
738 if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
739 return curr;
740 return prim;
741 case BOND_PRI_RESELECT_FAILURE:
742 return curr;
743 default:
744 netdev_err(bond->dev, "impossible primary_reselect %d\n",
745 bond->params.primary_reselect);
746 return curr;
702 } 747 }
703 if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
704 (prim->speed < curr->speed ||
705 (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
706 return false;
707 if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
708 return false;
709 return true;
710} 748}
711 749
712/** 750/**
713 * find_best_interface - select the best available slave to be the active one 751 * bond_find_best_slave - select the best available slave to be the active one
714 * @bond: our bonding struct 752 * @bond: our bonding struct
715 */ 753 */
716static struct slave *bond_find_best_slave(struct bonding *bond) 754static struct slave *bond_find_best_slave(struct bonding *bond)
717{ 755{
718 struct slave *slave, *bestslave = NULL, *primary; 756 struct slave *slave, *bestslave = NULL;
719 struct list_head *iter; 757 struct list_head *iter;
720 int mintime = bond->params.updelay; 758 int mintime = bond->params.updelay;
721 759
722 primary = rtnl_dereference(bond->primary_slave); 760 slave = bond_choose_primary_or_current(bond);
723 if (primary && primary->link == BOND_LINK_UP && 761 if (slave)
724 bond_should_change_active(bond)) 762 return slave;
725 return primary;
726 763
727 bond_for_each_slave(bond, slave, iter) { 764 bond_for_each_slave(bond, slave, iter) {
728 if (slave->link == BOND_LINK_UP) 765 if (slave->link == BOND_LINK_UP)
@@ -749,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
749 slave ? slave->dev->name : "NULL"); 786 slave ? slave->dev->name : "NULL");
750 787
751 if (!slave || !bond->send_peer_notif || 788 if (!slave || !bond->send_peer_notif ||
789 !netif_carrier_ok(bond->dev) ||
752 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 790 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
753 return false; 791 return false;
754 792
@@ -1708,9 +1746,16 @@ err_free:
1708 1746
1709err_undo_flags: 1747err_undo_flags:
1710 /* Enslave of first slave has failed and we need to fix master's mac */ 1748 /* Enslave of first slave has failed and we need to fix master's mac */
1711 if (!bond_has_slaves(bond) && 1749 if (!bond_has_slaves(bond)) {
1712 ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1750 if (ether_addr_equal_64bits(bond_dev->dev_addr,
1713 eth_hw_addr_random(bond_dev); 1751 slave_dev->dev_addr))
1752 eth_hw_addr_random(bond_dev);
1753 if (bond_dev->type != ARPHRD_ETHER) {
1754 ether_setup(bond_dev);
1755 bond_dev->flags |= IFF_MASTER;
1756 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1757 }
1758 }
1714 1759
1715 return res; 1760 return res;
1716} 1761}
@@ -1899,6 +1944,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1899 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1944 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1900 netdev_info(bond_dev, "Destroying bond %s\n", 1945 netdev_info(bond_dev, "Destroying bond %s\n",
1901 bond_dev->name); 1946 bond_dev->name);
1947 bond_remove_proc_entry(bond);
1902 unregister_netdevice(bond_dev); 1948 unregister_netdevice(bond_dev);
1903 } 1949 }
1904 return ret; 1950 return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
577 577
578 cf->can_id |= CAN_ERR_CRTL; 578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
580 netif_receive_skb(skb);
581 580
582 stats->rx_packets++; 581 stats->rx_packets++;
583 stats->rx_bytes += cf->can_dlc; 582 stats->rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584} 584}
585 585
586/** 586/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
642 } 642 }
643 643
644 at91_read_mb(dev, mb, cf); 644 at91_read_mb(dev, mb, cf);
645 netif_receive_skb(skb);
646 645
647 stats->rx_packets++; 646 stats->rx_packets++;
648 stats->rx_bytes += cf->can_dlc; 647 stats->rx_bytes += cf->can_dlc;
648 netif_receive_skb(skb);
649 649
650 can_led_event(dev, CAN_LED_EVENT_RX); 650 can_led_event(dev, CAN_LED_EVENT_RX);
651} 651}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
802 return 0; 802 return 0;
803 803
804 at91_poll_err_frame(dev, cf, reg_sr); 804 at91_poll_err_frame(dev, cf, reg_sr);
805 netif_receive_skb(skb);
806 805
807 dev->stats.rx_packets++; 806 dev->stats.rx_packets++;
808 dev->stats.rx_bytes += cf->can_dlc; 807 dev->stats.rx_bytes += cf->can_dlc;
808 netif_receive_skb(skb);
809 809
810 return 1; 810 return 1;
811} 811}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
1067 return; 1067 return;
1068 1068
1069 at91_irq_err_state(dev, cf, new_state); 1069 at91_irq_err_state(dev, cf, new_state);
1070 netif_rx(skb);
1071 1070
1072 dev->stats.rx_packets++; 1071 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += cf->can_dlc; 1072 dev->stats.rx_bytes += cf->can_dlc;
1073 netif_rx(skb);
1074 1074
1075 priv->can.state = new_state; 1075 priv->can.state = new_state;
1076} 1076}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
425 } 425 }
426 426
427 netif_rx(skb);
428
429 stats->rx_packets++; 427 stats->rx_packets++;
430 stats->rx_bytes += cf->can_dlc; 428 stats->rx_bytes += cf->can_dlc;
429 netif_rx(skb);
431} 430}
432 431
433static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) 432static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
508 507
509 priv->can.state = state; 508 priv->can.state = state;
510 509
511 netif_rx(skb);
512
513 stats->rx_packets++; 510 stats->rx_packets++;
514 stats->rx_bytes += cf->can_dlc; 511 stats->rx_bytes += cf->can_dlc;
512 netif_rx(skb);
515 513
516 return 0; 514 return 0;
517} 515}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 041525d2595c..5d214d135332 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
592{ 592{
593 struct c_can_priv *priv = netdev_priv(dev); 593 struct c_can_priv *priv = netdev_priv(dev);
594 int err; 594 int err;
595 struct pinctrl *p;
595 596
596 /* basic c_can configuration */ 597 /* basic c_can configuration */
597 err = c_can_chip_config(dev); 598 err = c_can_chip_config(dev);
@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
604 605
605 priv->can.state = CAN_STATE_ERROR_ACTIVE; 606 priv->can.state = CAN_STATE_ERROR_ACTIVE;
606 607
607 /* activate pins */ 608 /* Attempt to use "active" if available else use "default" */
608 pinctrl_pm_select_default_state(dev->dev.parent); 609 p = pinctrl_get_select(priv->device, "active");
610 if (!IS_ERR(p))
611 pinctrl_put(p);
612 else
613 pinctrl_pm_select_default_state(priv->device);
614
609 return 0; 615 return 0;
610} 616}
611 617
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
504 for (i = 0; i < cf->can_dlc; i++) 504 for (i = 0; i < cf->can_dlc; i++)
505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
506 } 506 }
507 netif_rx(skb);
508 507
509 stats->rx_packets++; 508 stats->rx_packets++;
510 stats->rx_bytes += cf->can_dlc; 509 stats->rx_bytes += cf->can_dlc;
510 netif_rx(skb);
511} 511}
512 512
513static int cc770_err(struct net_device *dev, u8 status) 513static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
584 } 584 }
585 } 585 }
586 586
587 netif_rx(skb);
588 587
589 stats->rx_packets++; 588 stats->rx_packets++;
590 stats->rx_bytes += cf->can_dlc; 589 stats->rx_bytes += cf->can_dlc;
590 netif_rx(skb);
591 591
592 return 0; 592 return 0;
593} 593}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e9b1810d319f..aede704605c6 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
440 struct can_frame *cf = (struct can_frame *)skb->data; 440 struct can_frame *cf = (struct can_frame *)skb->data;
441 u8 dlc = cf->can_dlc; 441 u8 dlc = cf->can_dlc;
442 442
443 if (!(skb->tstamp.tv64))
444 __net_timestamp(skb);
445
446 netif_rx(priv->echo_skb[idx]); 443 netif_rx(priv->echo_skb[idx]);
447 priv->echo_skb[idx] = NULL; 444 priv->echo_skb[idx] = NULL;
448 445
@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
578 if (unlikely(!skb)) 575 if (unlikely(!skb))
579 return NULL; 576 return NULL;
580 577
581 __net_timestamp(skb);
582 skb->protocol = htons(ETH_P_CAN); 578 skb->protocol = htons(ETH_P_CAN);
583 skb->pkt_type = PACKET_BROADCAST; 579 skb->pkt_type = PACKET_BROADCAST;
584 skb->ip_summed = CHECKSUM_UNNECESSARY; 580 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
589 585
590 can_skb_reserve(skb); 586 can_skb_reserve(skb);
591 can_skb_prv(skb)->ifindex = dev->ifindex; 587 can_skb_prv(skb)->ifindex = dev->ifindex;
588 can_skb_prv(skb)->skbcnt = 0;
592 589
593 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame)); 590 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
594 memset(*cf, 0, sizeof(struct can_frame)); 591 memset(*cf, 0, sizeof(struct can_frame));
@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
607 if (unlikely(!skb)) 604 if (unlikely(!skb))
608 return NULL; 605 return NULL;
609 606
610 __net_timestamp(skb);
611 skb->protocol = htons(ETH_P_CANFD); 607 skb->protocol = htons(ETH_P_CANFD);
612 skb->pkt_type = PACKET_BROADCAST; 608 skb->pkt_type = PACKET_BROADCAST;
613 skb->ip_summed = CHECKSUM_UNNECESSARY; 609 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
618 614
619 can_skb_reserve(skb); 615 can_skb_reserve(skb);
620 can_skb_prv(skb)->ifindex = dev->ifindex; 616 can_skb_prv(skb)->ifindex = dev->ifindex;
617 can_skb_prv(skb)->skbcnt = 0;
621 618
622 *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame)); 619 *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
623 memset(*cfd, 0, sizeof(struct canfd_frame)); 620 memset(*cfd, 0, sizeof(struct canfd_frame));
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
577 return 0; 577 return 0;
578 578
579 do_bus_err(dev, cf, reg_esr); 579 do_bus_err(dev, cf, reg_esr);
580 netif_receive_skb(skb);
581 580
582 dev->stats.rx_packets++; 581 dev->stats.rx_packets++;
583 dev->stats.rx_bytes += cf->can_dlc; 582 dev->stats.rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584 584
585 return 1; 585 return 1;
586} 586}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 622 if (unlikely(new_state == CAN_STATE_BUS_OFF))
623 can_bus_off(dev); 623 can_bus_off(dev);
624 624
625 netif_receive_skb(skb);
626
627 dev->stats.rx_packets++; 625 dev->stats.rx_packets++;
628 dev->stats.rx_bytes += cf->can_dlc; 626 dev->stats.rx_bytes += cf->can_dlc;
627 netif_receive_skb(skb);
629 628
630 return 1; 629 return 1;
631} 630}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
670 } 669 }
671 670
672 flexcan_read_fifo(dev, cf); 671 flexcan_read_fifo(dev, cf);
673 netif_receive_skb(skb);
674 672
675 stats->rx_packets++; 673 stats->rx_packets++;
676 stats->rx_bytes += cf->can_dlc; 674 stats->rx_bytes += cf->can_dlc;
675 netif_receive_skb(skb);
677 676
678 can_led_event(dev, CAN_LED_EVENT_RX); 677 can_led_event(dev, CAN_LED_EVENT_RX);
679 678
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
1216 cf->data[i] = (u8)(slot[j] >> shift); 1216 cf->data[i] = (u8)(slot[j] >> shift);
1217 } 1217 }
1218 } 1218 }
1219 netif_receive_skb(skb);
1220 1219
1221 /* Update statistics and read pointer */ 1220 /* Update statistics and read pointer */
1222 stats->rx_packets++; 1221 stats->rx_packets++;
1223 stats->rx_bytes += cf->can_dlc; 1222 stats->rx_bytes += cf->can_dlc;
1223 netif_receive_skb(skb);
1224
1224 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
1225 } 1226 }
1226 1227
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 7deb80dcbe8c..7bd54191f962 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -508,7 +508,8 @@ static int rcar_can_open(struct net_device *ndev)
508 508
509 err = clk_prepare_enable(priv->clk); 509 err = clk_prepare_enable(priv->clk);
510 if (err) { 510 if (err) {
511 netdev_err(ndev, "failed to enable periperal clock, error %d\n", 511 netdev_err(ndev,
512 "failed to enable peripheral clock, error %d\n",
512 err); 513 err);
513 goto out; 514 goto out;
514 } 515 }
@@ -526,7 +527,8 @@ static int rcar_can_open(struct net_device *ndev)
526 napi_enable(&priv->napi); 527 napi_enable(&priv->napi);
527 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); 528 err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
528 if (err) { 529 if (err) {
529 netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq); 530 netdev_err(ndev, "request_irq(%d) failed, error %d\n",
531 ndev->irq, err);
530 goto out_close; 532 goto out_close;
531 } 533 }
532 can_led_event(ndev, CAN_LED_EVENT_OPEN); 534 can_led_event(ndev, CAN_LED_EVENT_OPEN);
@@ -758,8 +760,9 @@ static int rcar_can_probe(struct platform_device *pdev)
758 } 760 }
759 761
760 irq = platform_get_irq(pdev, 0); 762 irq = platform_get_irq(pdev, 0);
761 if (!irq) { 763 if (irq < 0) {
762 dev_err(&pdev->dev, "No IRQ resource\n"); 764 dev_err(&pdev->dev, "No IRQ resource\n");
765 err = irq;
763 goto fail; 766 goto fail;
764 } 767 }
765 768
@@ -782,7 +785,8 @@ static int rcar_can_probe(struct platform_device *pdev)
782 priv->clk = devm_clk_get(&pdev->dev, "clkp1"); 785 priv->clk = devm_clk_get(&pdev->dev, "clkp1");
783 if (IS_ERR(priv->clk)) { 786 if (IS_ERR(priv->clk)) {
784 err = PTR_ERR(priv->clk); 787 err = PTR_ERR(priv->clk);
785 dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err); 788 dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
789 err);
786 goto fail_clk; 790 goto fail_clk;
787 } 791 }
788 792
@@ -794,7 +798,7 @@ static int rcar_can_probe(struct platform_device *pdev)
794 priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); 798 priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]);
795 if (IS_ERR(priv->can_clk)) { 799 if (IS_ERR(priv->can_clk)) {
796 err = PTR_ERR(priv->can_clk); 800 err = PTR_ERR(priv->can_clk);
797 dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err); 801 dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err);
798 goto fail_clk; 802 goto fail_clk;
799 } 803 }
800 804
@@ -823,7 +827,7 @@ static int rcar_can_probe(struct platform_device *pdev)
823 827
824 devm_can_led_init(ndev); 828 devm_can_led_init(ndev);
825 829
826 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", 830 dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
827 priv->regs, ndev->irq); 831 priv->regs, ndev->irq);
828 832
829 return 0; 833 return 0;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
377 /* release receive buffer */ 377 /* release receive buffer */
378 sja1000_write_cmdreg(priv, CMD_RRB); 378 sja1000_write_cmdreg(priv, CMD_RRB);
379 379
380 netif_rx(skb);
381
382 stats->rx_packets++; 380 stats->rx_packets++;
383 stats->rx_bytes += cf->can_dlc; 381 stats->rx_bytes += cf->can_dlc;
382 netif_rx(skb);
384 383
385 can_led_event(dev, CAN_LED_EVENT_RX); 384 can_led_event(dev, CAN_LED_EVENT_RX);
386} 385}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
484 can_bus_off(dev); 483 can_bus_off(dev);
485 } 484 }
486 485
487 netif_rx(skb);
488
489 stats->rx_packets++; 486 stats->rx_packets++;
490 stats->rx_bytes += cf->can_dlc; 487 stats->rx_bytes += cf->can_dlc;
488 netif_rx(skb);
491 489
492 return 0; 490 return 0;
493} 491}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f64f5290d6f8..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
207 if (!skb) 207 if (!skb)
208 return; 208 return;
209 209
210 __net_timestamp(skb);
211 skb->dev = sl->dev; 210 skb->dev = sl->dev;
212 skb->protocol = htons(ETH_P_CAN); 211 skb->protocol = htons(ETH_P_CAN);
213 skb->pkt_type = PACKET_BROADCAST; 212 skb->pkt_type = PACKET_BROADCAST;
@@ -215,13 +214,14 @@ static void slc_bump(struct slcan *sl)
215 214
216 can_skb_reserve(skb); 215 can_skb_reserve(skb);
217 can_skb_prv(skb)->ifindex = sl->dev->ifindex; 216 can_skb_prv(skb)->ifindex = sl->dev->ifindex;
217 can_skb_prv(skb)->skbcnt = 0;
218 218
219 memcpy(skb_put(skb, sizeof(struct can_frame)), 219 memcpy(skb_put(skb, sizeof(struct can_frame)),
220 &cf, sizeof(struct can_frame)); 220 &cf, sizeof(struct can_frame));
221 netif_rx_ni(skb);
222 221
223 sl->dev->stats.rx_packets++; 222 sl->dev->stats.rx_packets++;
224 sl->dev->stats.rx_bytes += cf.can_dlc; 223 sl->dev->stats.rx_bytes += cf.can_dlc;
224 netif_rx_ni(skb);
225} 225}
226 226
227/* parse tty input stream */ 227/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1086 if (ret) 1086 if (ret)
1087 goto out_clk; 1087 goto out_clk;
1088 1088
1089 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1089 priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
1090 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1090 priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
1093 ret = -EPROBE_DEFER; 1093 ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
1222 struct spi_device *spi = to_spi_device(dev); 1222 struct spi_device *spi = to_spi_device(dev);
1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1224 1224
1225 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 if (priv->after_suspend & AFTER_SUSPEND_POWER)
1226 mcp251x_power_enable(priv->power, 1); 1226 mcp251x_power_enable(priv->power, 1);
1227
1228 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1229 mcp251x_power_enable(priv->transceiver, 1);
1227 queue_work(priv->wq, &priv->restart_work); 1230 queue_work(priv->wq, &priv->restart_work);
1228 } else { 1231 } else {
1229 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1232 priv->after_suspend = 0;
1230 mcp251x_power_enable(priv->transceiver, 1);
1231 queue_work(priv->wq, &priv->restart_work);
1232 } else {
1233 priv->after_suspend = 0;
1234 }
1235 } 1233 }
1234
1236 priv->force_quit = 0; 1235 priv->force_quit = 0;
1237 enable_irq(spi->irq); 1236 enable_irq(spi->irq);
1238 return 0; 1237 return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
747 } 747 }
748 } 748 }
749 749
750 netif_rx(skb);
751 stats->rx_packets++; 750 stats->rx_packets++;
752 stats->rx_bytes += cf->can_dlc; 751 stats->rx_bytes += cf->can_dlc;
752 netif_rx(skb);
753 753
754 return 0; 754 return 0;
755} 755}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
324 cf->data[i] = msg->msg.can_msg.msg[i]; 324 cf->data[i] = msg->msg.can_msg.msg[i];
325 } 325 }
326 326
327 netif_rx(skb);
328
329 stats->rx_packets++; 327 stats->rx_packets++;
330 stats->rx_bytes += cf->can_dlc; 328 stats->rx_bytes += cf->can_dlc;
329 netif_rx(skb);
331} 330}
332 331
333static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) 332static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
400 stats->rx_errors++; 399 stats->rx_errors++;
401 } 400 }
402 401
403 netif_rx(skb);
404
405 stats->rx_packets++; 402 stats->rx_packets++;
406 stats->rx_bytes += cf->can_dlc; 403 stats->rx_bytes += cf->can_dlc;
404 netif_rx(skb);
407} 405}
408 406
409/* 407/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
301 cf->data[7] = rxerr; 301 cf->data[7] = rxerr;
302 } 302 }
303 303
304 netif_rx(skb);
305
306 priv->bec.txerr = txerr; 304 priv->bec.txerr = txerr;
307 priv->bec.rxerr = rxerr; 305 priv->bec.rxerr = rxerr;
308 306
309 stats->rx_packets++; 307 stats->rx_packets++;
310 stats->rx_bytes += cf->can_dlc; 308 stats->rx_bytes += cf->can_dlc;
309 netif_rx(skb);
311 } 310 }
312} 311}
313 312
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
347 cf->data[i] = msg->msg.rx.data[i]; 346 cf->data[i] = msg->msg.rx.data[i];
348 } 347 }
349 348
350 netif_rx(skb);
351
352 stats->rx_packets++; 349 stats->rx_packets++;
353 stats->rx_bytes += cf->can_dlc; 350 stats->rx_bytes += cf->can_dlc;
351 netif_rx(skb);
354 } 352 }
355 353
356 return; 354 return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
526 hwts->hwtstamp = timeval_to_ktime(tv); 526 hwts->hwtstamp = timeval_to_ktime(tv);
527 } 527 }
528 528
529 netif_rx(skb);
530 mc->netdev->stats.rx_packets++; 529 mc->netdev->stats.rx_packets++;
531 mc->netdev->stats.rx_bytes += cf->can_dlc; 530 mc->netdev->stats.rx_bytes += cf->can_dlc;
531 netif_rx(skb);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
659 hwts = skb_hwtstamps(skb); 659 hwts = skb_hwtstamps(skb);
660 hwts->hwtstamp = timeval_to_ktime(tv); 660 hwts->hwtstamp = timeval_to_ktime(tv);
661 661
662 /* push the skb */
663 netif_rx(skb);
664
665 /* update statistics */ 662 /* update statistics */
666 mc->netdev->stats.rx_packets++; 663 mc->netdev->stats.rx_packets++;
667 mc->netdev->stats.rx_bytes += cf->can_dlc; 664 mc->netdev->stats.rx_bytes += cf->can_dlc;
665 /* push the skb */
666 netif_rx(skb);
668 667
669 return 0; 668 return 0;
670 669
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
553 hwts = skb_hwtstamps(skb); 553 hwts = skb_hwtstamps(skb);
554 hwts->hwtstamp = timeval_to_ktime(tv); 554 hwts->hwtstamp = timeval_to_ktime(tv);
555 555
556 netif_rx(skb);
557 netdev->stats.rx_packets++; 556 netdev->stats.rx_packets++;
558 netdev->stats.rx_bytes += can_frame->can_dlc; 557 netdev->stats.rx_bytes += can_frame->can_dlc;
558 netif_rx(skb);
559 559
560 return 0; 560 return 0;
561} 561}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
671 hwts = skb_hwtstamps(skb); 671 hwts = skb_hwtstamps(skb);
672 hwts->hwtstamp = timeval_to_ktime(tv); 672 hwts->hwtstamp = timeval_to_ktime(tv);
673 netif_rx(skb);
674 netdev->stats.rx_packets++; 673 netdev->stats.rx_packets++;
675 netdev->stats.rx_bytes += can_frame->can_dlc; 674 netdev->stats.rx_bytes += can_frame->can_dlc;
675 netif_rx(skb);
676 676
677 return 0; 677 return 0;
678} 678}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
461 priv->bec.txerr = txerr; 461 priv->bec.txerr = txerr;
462 priv->bec.rxerr = rxerr; 462 priv->bec.rxerr = rxerr;
463 463
464 netif_rx(skb);
465
466 stats->rx_packets++; 464 stats->rx_packets++;
467 stats->rx_bytes += cf->can_dlc; 465 stats->rx_bytes += cf->can_dlc;
466 netif_rx(skb);
468} 467}
469 468
470/* Read data and status frames */ 469/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
494 else 493 else
495 memcpy(cf->data, msg->data, cf->can_dlc); 494 memcpy(cf->data, msg->data, cf->can_dlc);
496 495
497 netif_rx(skb);
498
499 stats->rx_packets++; 496 stats->rx_packets++;
500 stats->rx_bytes += cf->can_dlc; 497 stats->rx_bytes += cf->can_dlc;
498 netif_rx(skb);
501 499
502 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 500 can_led_event(priv->netdev, CAN_LED_EVENT_RX);
503 } else { 501 } else {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0ce868de855d..674f367087c5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
78 skb->dev = dev; 78 skb->dev = dev;
79 skb->ip_summed = CHECKSUM_UNNECESSARY; 79 skb->ip_summed = CHECKSUM_UNNECESSARY;
80 80
81 if (!(skb->tstamp.tv64))
82 __net_timestamp(skb);
83
84 netif_rx_ni(skb); 81 netif_rx_ni(skb);
85} 82}
86 83
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
696 } 696 }
697 697
698 /* Include the pseudo-PHY address and the broadcast PHY address to 698 /* Include the pseudo-PHY address and the broadcast PHY address to
699 * divert reads towards our workaround 699 * divert reads towards our workaround. This is only required for
700 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
701 * that we can use the regular SWITCH_MDIO master controller instead.
702 *
703 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
704 * to have a 1:1 mapping between Port address and PHY address in order
705 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
706 * not what we want here, so we initialize phys_mii_mask 0 to always
707 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
700 */ 708 */
701 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 if (of_machine_is_compatible("brcm,bcm7445d0"))
710 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
711 else
712 ds->phys_mii_mask = 0;
702 713
703 rev = reg_readl(priv, REG_SWITCH_REVISION); 714 rev = reg_readl(priv, REG_SWITCH_REVISION);
704 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1163 1163
1164 newfid = __ffs(ps->fid_mask); 1164 newfid = __ffs(ps->fid_mask);
1165 ps->fid[port] = newfid; 1165 ps->fid[port] = newfid;
1166 ps->fid_mask &= (1 << newfid); 1166 ps->fid_mask &= ~(1 << newfid);
1167 ps->bridge_mask[fid] &= ~(1 << port); 1167 ps->bridge_mask[fid] &= ~(1 << port);
1168 ps->bridge_mask[newfid] = 1 << port; 1168 ps->bridge_mask[newfid] = 1 << port;
1169 1169
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 41095ebad97f..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1764 } 1764 }
1765 if (i != RX_RING_SIZE) { 1765 if (i != RX_RING_SIZE) {
1766 int j;
1767 pr_emerg("%s: no memory for rx ring\n", dev->name); 1766 pr_emerg("%s: no memory for rx ring\n", dev->name);
1768 for (j = 0; j < i; j++) {
1769 if (vp->rx_skbuff[j]) {
1770 dev_kfree_skb(vp->rx_skbuff[j]);
1771 vp->rx_skbuff[j] = NULL;
1772 }
1773 }
1774 retval = -ENOMEM; 1767 retval = -ENOMEM;
1775 goto err_free_irq; 1768 goto err_free_skb;
1776 } 1769 }
1777 /* Wrap the ring. */ 1770 /* Wrap the ring. */
1778 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); 1771 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
1782 if (!retval) 1775 if (!retval)
1783 goto out; 1776 goto out;
1784 1777
1785err_free_irq: 1778err_free_skb:
1779 for (i = 0; i < RX_RING_SIZE; i++) {
1780 if (vp->rx_skbuff[i]) {
1781 dev_kfree_skb(vp->rx_skbuff[i]);
1782 vp->rx_skbuff[i] = NULL;
1783 }
1784 }
1786 free_irq(dev->irq, dev); 1785 free_irq(dev->irq, dev);
1787err: 1786err:
1788 if (vortex_debug > 1) 1787 if (vortex_debug > 1)
@@ -2382,6 +2381,7 @@ boomerang_interrupt(int irq, void *dev_id)
2382 void __iomem *ioaddr; 2381 void __iomem *ioaddr;
2383 int status; 2382 int status;
2384 int work_done = max_interrupt_work; 2383 int work_done = max_interrupt_work;
2384 int handled = 0;
2385 2385
2386 ioaddr = vp->ioaddr; 2386 ioaddr = vp->ioaddr;
2387 2387
@@ -2400,6 +2400,7 @@ boomerang_interrupt(int irq, void *dev_id)
2400 2400
2401 if ((status & IntLatch) == 0) 2401 if ((status & IntLatch) == 0)
2402 goto handler_exit; /* No interrupt: shared IRQs can cause this */ 2402 goto handler_exit; /* No interrupt: shared IRQs can cause this */
2403 handled = 1;
2403 2404
2404 if (status == 0xffff) { /* h/w no longer present (hotplug)? */ 2405 if (status == 0xffff) { /* h/w no longer present (hotplug)? */
2405 if (vortex_debug > 1) 2406 if (vortex_debug > 1)
@@ -2501,7 +2502,7 @@ boomerang_interrupt(int irq, void *dev_id)
2501handler_exit: 2502handler_exit:
2502 vp->handling_irq = 0; 2503 vp->handling_irq = 0;
2503 spin_unlock(&vp->lock); 2504 spin_unlock(&vp->lock);
2504 return IRQ_HANDLED; 2505 return IRQ_RETVAL(handled);
2505} 2506}
2506 2507
2507static int vortex_rx(struct net_device *dev) 2508static int vortex_rx(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 661cdaa7ea96..b3bc87fe3764 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -303,7 +303,8 @@ static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
303 get_page(pa->pages); 303 get_page(pa->pages);
304 bd->pa = *pa; 304 bd->pa = *pa;
305 305
306 bd->dma = pa->pages_dma + pa->pages_offset; 306 bd->dma_base = pa->pages_dma;
307 bd->dma_off = pa->pages_offset;
307 bd->dma_len = len; 308 bd->dma_len = len;
308 309
309 pa->pages_offset += len; 310 pa->pages_offset += len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 506e832c9e9a..a4473d8ff4fa 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1110,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1110 unsigned int rx_usecs = pdata->rx_usecs; 1110 unsigned int rx_usecs = pdata->rx_usecs;
1111 unsigned int rx_frames = pdata->rx_frames; 1111 unsigned int rx_frames = pdata->rx_frames;
1112 unsigned int inte; 1112 unsigned int inte;
1113 dma_addr_t hdr_dma, buf_dma;
1113 1114
1114 if (!rx_usecs && !rx_frames) { 1115 if (!rx_usecs && !rx_frames) {
1115 /* No coalescing, interrupt for every descriptor */ 1116 /* No coalescing, interrupt for every descriptor */
@@ -1129,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1129 * Set buffer 2 (hi) address to buffer dma address (hi) and 1130 * Set buffer 2 (hi) address to buffer dma address (hi) and
1130 * set control bits OWN and INTE 1131 * set control bits OWN and INTE
1131 */ 1132 */
1132 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); 1133 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1133 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); 1134 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1134 rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); 1135 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1135 rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); 1136 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1137 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1138 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1136 1139
1137 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1140 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1138 1141
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 1e9c28d19ef8..aae9d5ecd182 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1765,8 +1765,9 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1765 /* Start with the header buffer which may contain just the header 1765 /* Start with the header buffer which may contain just the header
1766 * or the header plus data 1766 * or the header plus data
1767 */ 1767 */
1768 dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma, 1768 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
1769 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE); 1769 rdata->rx.hdr.dma_off,
1770 rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
1770 1771
1771 packet = page_address(rdata->rx.hdr.pa.pages) + 1772 packet = page_address(rdata->rx.hdr.pa.pages) +
1772 rdata->rx.hdr.pa.pages_offset; 1773 rdata->rx.hdr.pa.pages_offset;
@@ -1778,8 +1779,11 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1778 len -= copy_len; 1779 len -= copy_len;
1779 if (len) { 1780 if (len) {
1780 /* Add the remaining data as a frag */ 1781 /* Add the remaining data as a frag */
1781 dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma, 1782 dma_sync_single_range_for_cpu(pdata->dev,
1782 rdata->rx.buf.dma_len, DMA_FROM_DEVICE); 1783 rdata->rx.buf.dma_base,
1784 rdata->rx.buf.dma_off,
1785 rdata->rx.buf.dma_len,
1786 DMA_FROM_DEVICE);
1783 1787
1784 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1788 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1785 rdata->rx.buf.pa.pages, 1789 rdata->rx.buf.pa.pages,
@@ -1945,8 +1949,9 @@ read_again:
1945 if (!skb) 1949 if (!skb)
1946 error = 1; 1950 error = 1;
1947 } else if (rdesc_len) { 1951 } else if (rdesc_len) {
1948 dma_sync_single_for_cpu(pdata->dev, 1952 dma_sync_single_range_for_cpu(pdata->dev,
1949 rdata->rx.buf.dma, 1953 rdata->rx.buf.dma_base,
1954 rdata->rx.buf.dma_off,
1950 rdata->rx.buf.dma_len, 1955 rdata->rx.buf.dma_len,
1951 DMA_FROM_DEVICE); 1956 DMA_FROM_DEVICE);
1952 1957
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 63d72a140053..717ce21b6077 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -337,7 +337,8 @@ struct xgbe_buffer_data {
337 struct xgbe_page_alloc pa; 337 struct xgbe_page_alloc pa;
338 struct xgbe_page_alloc pa_unmap; 338 struct xgbe_page_alloc pa_unmap;
339 339
340 dma_addr_t dma; 340 dma_addr_t dma_base;
341 unsigned long dma_off;
341 unsigned int dma_len; 342 unsigned int dma_len;
342}; 343};
343 344
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 909ad7a0d480..4566cdf0bc39 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1793,7 +1793,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1793 macaddr = of_get_mac_address(dn); 1793 macaddr = of_get_mac_address(dn);
1794 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1794 if (!macaddr || !is_valid_ether_addr(macaddr)) {
1795 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1795 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
1796 random_ether_addr(dev->dev_addr); 1796 eth_hw_addr_random(dev);
1797 } else { 1797 } else {
1798 ether_addr_copy(dev->dev_addr, macaddr); 1798 ether_addr_copy(dev->dev_addr, macaddr);
1799 } 1799 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
262 if (likely(skb)) { 262 if (likely(skb)) {
263 (*pkts_compl)++; 263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len; 264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
265 } 266 }
266 267
267 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0; 268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL; 269 tx_buf->skb = NULL;
270 270
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1718 offset += sizeof(u32); 1718 offset += sizeof(u32);
1719 data_buf += sizeof(u32); 1719 data_buf += sizeof(u32);
1720 written_so_far += sizeof(u32); 1720 written_so_far += sizeof(u32);
1721
1722 /* At end of each 4Kb page, release nvram lock to allow MFW
1723 * chance to take it for its own use.
1724 */
1725 if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
1726 (written_so_far < buf_size)) {
1727 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1728 "Releasing NVM lock after offset 0x%x\n",
1729 (u32)(offset - sizeof(u32)));
1730 bnx2x_release_nvram_lock(bp);
1731 usleep_range(1000, 2000);
1732 rc = bnx2x_acquire_nvram_lock(bp);
1733 if (rc)
1734 return rc;
1735 }
1736
1721 cmd_flags = 0; 1737 cmd_flags = 0;
1722 } 1738 }
1723 1739
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b43b2cb9b830..64c1e9db6b0b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1230,7 +1230,6 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1230 new_skb = skb_realloc_headroom(skb, sizeof(*status)); 1230 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1231 dev_kfree_skb(skb); 1231 dev_kfree_skb(skb);
1232 if (!new_skb) { 1232 if (!new_skb) {
1233 dev->stats.tx_errors++;
1234 dev->stats.tx_dropped++; 1233 dev->stats.tx_dropped++;
1235 return NULL; 1234 return NULL;
1236 } 1235 }
@@ -1465,7 +1464,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1465 1464
1466 if (unlikely(!skb)) { 1465 if (unlikely(!skb)) {
1467 dev->stats.rx_dropped++; 1466 dev->stats.rx_dropped++;
1468 dev->stats.rx_errors++;
1469 goto next; 1467 goto next;
1470 } 1468 }
1471 1469
@@ -1493,7 +1491,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1493 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1491 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1494 netif_err(priv, rx_status, dev, 1492 netif_err(priv, rx_status, dev,
1495 "dropping fragmented packet!\n"); 1493 "dropping fragmented packet!\n");
1496 dev->stats.rx_dropped++;
1497 dev->stats.rx_errors++; 1494 dev->stats.rx_errors++;
1498 dev_kfree_skb_any(skb); 1495 dev_kfree_skb_any(skb);
1499 goto next; 1496 goto next;
@@ -1515,7 +1512,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
1515 dev->stats.rx_frame_errors++; 1512 dev->stats.rx_frame_errors++;
1516 if (dma_flag & DMA_RX_LG) 1513 if (dma_flag & DMA_RX_LG)
1517 dev->stats.rx_length_errors++; 1514 dev->stats.rx_length_errors++;
1518 dev->stats.rx_dropped++;
1519 dev->stats.rx_errors++; 1515 dev->stats.rx_errors++;
1520 dev_kfree_skb_any(skb); 1516 dev_kfree_skb_any(skb);
1521 goto next; 1517 goto next;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ac27e24264a5..f557a2aaec23 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1508 __raw_writeq(reg, port); 1508 __raw_writeq(reg, port);
1509 port = s->sbm_base + R_MAC_ETHERNET_ADDR; 1509 port = s->sbm_base + R_MAC_ETHERNET_ADDR;
1510 1510
1511#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
1512 /*
1513 * Pass1 SOCs do not receive packets addressed to the
1514 * destination address in the R_MAC_ETHERNET_ADDR register.
1515 * Set the value to zero.
1516 */
1517 __raw_writeq(0, port);
1518#else
1519 __raw_writeq(reg, port); 1511 __raw_writeq(reg, port);
1520#endif
1521 1512
1522 /* 1513 /*
1523 * Set the receive filter for no packets, and write values 1514 * Set the receive filter for no packets, and write values
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
676 if (!next_cmpl->valid) 676 if (!next_cmpl->valid)
677 break; 677 break;
678 } 678 }
679 packets++;
679 680
680 /* TODO: BNA_CQ_EF_LOCAL ? */ 681 /* TODO: BNA_CQ_EF_LOCAL ? */
681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 682 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
692 else 693 else
693 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 694 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
694 695
695 packets++;
696 rcb->rxq->rx_packets++; 696 rcb->rxq->rx_packets++;
697 rcb->rxq->rx_bytes += totlen; 697 rcb->rxq->rx_bytes += totlen;
698 ccb->bytes_per_intr += totlen; 698 ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
105} 105}
106 106
107/* I/O accessors */
108static u32 hw_readl_native(struct macb *bp, int offset)
109{
110 return __raw_readl(bp->regs + offset);
111}
112
113static void hw_writel_native(struct macb *bp, int offset, u32 value)
114{
115 __raw_writel(value, bp->regs + offset);
116}
117
118static u32 hw_readl(struct macb *bp, int offset)
119{
120 return readl_relaxed(bp->regs + offset);
121}
122
123static void hw_writel(struct macb *bp, int offset, u32 value)
124{
125 writel_relaxed(value, bp->regs + offset);
126}
127
128/*
129 * Find the CPU endianness by using the loopback bit of NCR register. When the
130 * CPU is in big endian we need to program swaped mode for management
131 * descriptor access.
132 */
133static bool hw_is_native_io(void __iomem *addr)
134{
135 u32 value = MACB_BIT(LLB);
136
137 __raw_writel(value, addr + MACB_NCR);
138 value = __raw_readl(addr + MACB_NCR);
139
140 /* Write 0 back to disable everything */
141 __raw_writel(0, addr + MACB_NCR);
142
143 return value == MACB_BIT(LLB);
144}
145
146static bool hw_is_gem(void __iomem *addr, bool native_io)
147{
148 u32 id;
149
150 if (native_io)
151 id = __raw_readl(addr + MACB_MID);
152 else
153 id = readl_relaxed(addr + MACB_MID);
154
155 return MACB_BFEXT(IDNUM, id) >= 0x2;
156}
157
107static void macb_set_hwaddr(struct macb *bp) 158static void macb_set_hwaddr(struct macb *bp)
108{ 159{
109 u32 bottom; 160 u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
160 } 211 }
161 } 212 }
162 213
163 netdev_info(bp->dev, "invalid hw address, using random\n"); 214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
164 eth_hw_addr_random(bp->dev); 215 eth_hw_addr_random(bp->dev);
165} 216}
166 217
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
252 struct macb *bp = netdev_priv(dev); 303 struct macb *bp = netdev_priv(dev);
253 struct phy_device *phydev = bp->phy_dev; 304 struct phy_device *phydev = bp->phy_dev;
254 unsigned long flags; 305 unsigned long flags;
255
256 int status_change = 0; 306 int status_change = 0;
257 307
258 spin_lock_irqsave(&bp->lock, flags); 308 spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
449 499
450static void macb_update_stats(struct macb *bp) 500static void macb_update_stats(struct macb *bp)
451{ 501{
452 u32 __iomem *reg = bp->regs + MACB_PFR;
453 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 502 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
454 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
504 int offset = MACB_PFR;
455 505
456 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 506 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
457 507
458 for(; p < end; p++, reg++) 508 for(; p < end; p++, offset += 4)
459 *p += readl_relaxed(reg); 509 *p += bp->macb_reg_readl(bp, offset);
460} 510}
461 511
462static int macb_halt_tx(struct macb *bp) 512static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
1107} 1157}
1108#endif 1158#endif
1109 1159
1110static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1111 unsigned int len)
1112{
1113 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1114}
1115
1116static unsigned int macb_tx_map(struct macb *bp, 1160static unsigned int macb_tx_map(struct macb *bp,
1117 struct macb_queue *queue, 1161 struct macb_queue *queue,
1118 struct sk_buff *skb) 1162 struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1263 * socket buffer: skb fragments of jumbo frames may need to be 1307 * socket buffer: skb fragments of jumbo frames may need to be
1264 * splitted into many buffer descriptors. 1308 * splitted into many buffer descriptors.
1265 */ 1309 */
1266 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); 1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1267 nr_frags = skb_shinfo(skb)->nr_frags; 1311 nr_frags = skb_shinfo(skb)->nr_frags;
1268 for (f = 0; f < nr_frags; f++) { 1312 for (f = 0; f < nr_frags; f++) {
1269 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1313 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1270 count += macb_count_tx_descriptors(bp, frag_size); 1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1271 } 1315 }
1272 1316
1273 spin_lock_irqsave(&bp->lock, flags); 1317 spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
1603static void macb_configure_dma(struct macb *bp) 1647static void macb_configure_dma(struct macb *bp)
1604{ 1648{
1605 u32 dmacfg; 1649 u32 dmacfg;
1606 u32 tmp, ncr;
1607 1650
1608 if (macb_is_gem(bp)) { 1651 if (macb_is_gem(bp)) {
1609 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
1613 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1656 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1614 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1657 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1615 1658
1616 /* Find the CPU endianness by using the loopback bit of net_ctrl 1659 if (bp->native_io)
1617 * register. save it first. When the CPU is in big endian we
1618 * need to program swaped mode for management descriptor access.
1619 */
1620 ncr = macb_readl(bp, NCR);
1621 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1622 tmp = __raw_readl(bp->regs + MACB_NCR);
1623
1624 if (tmp == MACB_BIT(LLB))
1625 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1660 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1626 else 1661 else
1627 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1662 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1628 1663
1629 /* Restore net_ctrl */
1630 macb_writel(bp, NCR, ncr);
1631
1632 if (bp->dev->features & NETIF_F_HW_CSUM) 1664 if (bp->dev->features & NETIF_F_HW_CSUM)
1633 dmacfg |= GEM_BIT(TXCOEN); 1665 dmacfg |= GEM_BIT(TXCOEN);
1634 else 1666 else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
1897 1929
1898static void gem_update_stats(struct macb *bp) 1930static void gem_update_stats(struct macb *bp)
1899{ 1931{
1900 int i; 1932 unsigned int i;
1901 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1933 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1902 1934
1903 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 1935 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1904 u32 offset = gem_statistics[i].offset; 1936 u32 offset = gem_statistics[i].offset;
1905 u64 val = readl_relaxed(bp->regs + offset); 1937 u64 val = bp->macb_reg_readl(bp, offset);
1906 1938
1907 bp->ethtool_stats[i] += val; 1939 bp->ethtool_stats[i] += val;
1908 *p += val; 1940 *p += val;
1909 1941
1910 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 1942 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1911 /* Add GEM_OCTTXH, GEM_OCTRXH */ 1943 /* Add GEM_OCTTXH, GEM_OCTRXH */
1912 val = readl_relaxed(bp->regs + offset + 4); 1944 val = bp->macb_reg_readl(bp, offset + 4);
1913 bp->ethtool_stats[i] += ((u64)val) << 32; 1945 bp->ethtool_stats[i] += ((u64)val) << 32;
1914 *(++p) += val; 1946 *(++p) += val;
1915 } 1947 }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
1976 2008
1977static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2009static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1978{ 2010{
1979 int i; 2011 unsigned int i;
1980 2012
1981 switch (sset) { 2013 switch (sset) {
1982 case ETH_SS_STATS: 2014 case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2190 if (dt_conf) 2222 if (dt_conf)
2191 bp->caps = dt_conf->caps; 2223 bp->caps = dt_conf->caps;
2192 2224
2193 if (macb_is_gem_hw(bp->regs)) { 2225 if (hw_is_gem(bp->regs, bp->native_io)) {
2194 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2226 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2195 2227
2196 dcfg = gem_readl(bp, DCFG1); 2228 dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2201 bp->caps |= MACB_CAPS_FIFO_MODE; 2233 bp->caps |= MACB_CAPS_FIFO_MODE;
2202 } 2234 }
2203 2235
2204 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2236 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2205} 2237}
2206 2238
2207static void macb_probe_queues(void __iomem *mem, 2239static void macb_probe_queues(void __iomem *mem,
2240 bool native_io,
2208 unsigned int *queue_mask, 2241 unsigned int *queue_mask,
2209 unsigned int *num_queues) 2242 unsigned int *num_queues)
2210{ 2243{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
2219 * we are early in the probe process and don't have the 2252 * we are early in the probe process and don't have the
2220 * MACB_CAPS_MACB_IS_GEM flag positioned 2253 * MACB_CAPS_MACB_IS_GEM flag positioned
2221 */ 2254 */
2222 if (!macb_is_gem_hw(mem)) 2255 if (!hw_is_gem(mem, native_io))
2223 return; 2256 return;
2224 2257
2225 /* bit 0 is never set but queue 0 always exists */ 2258 /* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
2786 struct clk *pclk, *hclk, *tx_clk; 2819 struct clk *pclk, *hclk, *tx_clk;
2787 unsigned int queue_mask, num_queues; 2820 unsigned int queue_mask, num_queues;
2788 struct macb_platform_data *pdata; 2821 struct macb_platform_data *pdata;
2822 bool native_io;
2789 struct phy_device *phydev; 2823 struct phy_device *phydev;
2790 struct net_device *dev; 2824 struct net_device *dev;
2791 struct resource *regs; 2825 struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
2794 struct macb *bp; 2828 struct macb *bp;
2795 int err; 2829 int err;
2796 2830
2831 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2832 mem = devm_ioremap_resource(&pdev->dev, regs);
2833 if (IS_ERR(mem))
2834 return PTR_ERR(mem);
2835
2797 if (np) { 2836 if (np) {
2798 const struct of_device_id *match; 2837 const struct of_device_id *match;
2799 2838
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
2809 if (err) 2848 if (err)
2810 return err; 2849 return err;
2811 2850
2812 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2851 native_io = hw_is_native_io(mem);
2813 mem = devm_ioremap_resource(&pdev->dev, regs);
2814 if (IS_ERR(mem)) {
2815 err = PTR_ERR(mem);
2816 goto err_disable_clocks;
2817 }
2818 2852
2819 macb_probe_queues(mem, &queue_mask, &num_queues); 2853 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2820 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2854 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2821 if (!dev) { 2855 if (!dev) {
2822 err = -ENOMEM; 2856 err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
2831 bp->pdev = pdev; 2865 bp->pdev = pdev;
2832 bp->dev = dev; 2866 bp->dev = dev;
2833 bp->regs = mem; 2867 bp->regs = mem;
2868 bp->native_io = native_io;
2869 if (native_io) {
2870 bp->macb_reg_readl = hw_readl_native;
2871 bp->macb_reg_writel = hw_writel_native;
2872 } else {
2873 bp->macb_reg_readl = hw_readl;
2874 bp->macb_reg_writel = hw_writel;
2875 }
2834 bp->num_queues = num_queues; 2876 bp->num_queues = num_queues;
2835 bp->queue_mask = queue_mask; 2877 bp->queue_mask = queue_mask;
2836 if (macb_config) 2878 if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
2838 bp->pclk = pclk; 2880 bp->pclk = pclk;
2839 bp->hclk = hclk; 2881 bp->hclk = hclk;
2840 bp->tx_clk = tx_clk; 2882 bp->tx_clk = tx_clk;
2841 if (macb_config->jumbo_max_len) { 2883 if (macb_config)
2842 bp->jumbo_max_len = macb_config->jumbo_max_len; 2884 bp->jumbo_max_len = macb_config->jumbo_max_len;
2843 }
2844 2885
2845 spin_lock_init(&bp->lock); 2886 spin_lock_init(&bp->lock);
2846 2887
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
429 | GEM_BF(name, value)) 429 | GEM_BF(name, value))
430 430
431/* Register access macros */ 431/* Register access macros */
432#define macb_readl(port,reg) \ 432#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
433 readl_relaxed((port)->regs + MACB_##reg) 433#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
434#define macb_writel(port,reg,value) \ 434#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
435 writel_relaxed((value), (port)->regs + MACB_##reg) 435#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
436#define gem_readl(port, reg) \ 436#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
437 readl_relaxed((port)->regs + GEM_##reg) 437#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
438#define gem_writel(port, reg, value) \
439 writel_relaxed((value), (port)->regs + GEM_##reg)
440#define queue_readl(queue, reg) \
441 readl_relaxed((queue)->bp->regs + (queue)->reg)
442#define queue_writel(queue, reg, value) \
443 writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
444 438
445/* Conditional GEM/MACB macros. These perform the operation to the correct 439/* Conditional GEM/MACB macros. These perform the operation to the correct
446 * register dependent on whether the device is a GEM or a MACB. For registers 440 * register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
785 779
786struct macb { 780struct macb {
787 void __iomem *regs; 781 void __iomem *regs;
782 bool native_io;
783
784 /* hardware IO accessors */
785 u32 (*macb_reg_readl)(struct macb *bp, int offset);
786 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
788 787
789 unsigned int rx_tail; 788 unsigned int rx_tail;
790 unsigned int rx_prepared_head; 789 unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
817 816
818 struct mii_bus *mii_bus; 817 struct mii_bus *mii_bus;
819 struct phy_device *phy_dev; 818 struct phy_device *phy_dev;
820 unsigned int link; 819 int link;
821 unsigned int speed; 820 int speed;
822 unsigned int duplex; 821 int duplex;
823 822
824 u32 caps; 823 u32 caps;
825 unsigned int dma_burst_length; 824 unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
843 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); 842 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
844} 843}
845 844
846static inline bool macb_is_gem_hw(void __iomem *addr)
847{
848 return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
849}
850
851#endif /* _MACB_H */ 845#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT
19 default ARCH_THUNDER
20 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
21 ---help--- 20 ---help---
22 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
29config THUNDER_NIC_VF 28config THUNDER_NIC_VF
30 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
31 depends on 64BIT 30 depends on 64BIT
32 default ARCH_THUNDER
33 ---help--- 31 ---help---
34 This driver supports Thunder's NIC virtual function 32 This driver supports Thunder's NIC virtual function
35 33
36config THUNDER_NIC_BGX 34config THUNDER_NIC_BGX
37 tristate "Thunder MAC interface driver (BGX)" 35 tristate "Thunder MAC interface driver (BGX)"
38 depends on 64BIT 36 depends on 64BIT
39 default ARCH_THUNDER
40 ---help--- 37 ---help---
41 This driver supports programming and controlling of MAC 38 This driver supports programming and controlling of MAC
42 interface from NIC physical function driver. 39 interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
125 */ 125 */
126#define NICPF_CLK_PER_INT_TICK 2 126#define NICPF_CLK_PER_INT_TICK 2
127 127
128/* Time to wait before we decide that a SQ is stuck.
129 *
130 * Since both pkt rx and tx notifications are done with same CQ,
131 * when packets are being received at very high rate (eg: L2 forwarding)
132 * then freeing transmitted skbs will be delayed and watchdog
133 * will kick in, resetting interface. Hence keeping this value high.
134 */
135#define NICVF_TX_TIMEOUT (50 * HZ)
136
128struct nicvf_cq_poll { 137struct nicvf_cq_poll {
129 u8 cq_idx; /* Completion queue index */ 138 u8 cq_idx; /* Completion queue index */
130 struct napi_struct napi; 139 struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
216 /* Tx */ 225 /* Tx */
217 u64 tx_frames_ok; 226 u64 tx_frames_ok;
218 u64 tx_drops; 227 u64 tx_drops;
219 u64 tx_busy;
220 u64 tx_tso; 228 u64 tx_tso;
229 u64 txq_stop;
230 u64 txq_wake;
221}; 231};
222 232
223struct nicvf { 233struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
66 NICVF_DRV_STAT(rx_frames_jumbo), 66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops), 67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok), 68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso), 69 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops), 70 NICVF_DRV_STAT(tx_drops),
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
72}; 73};
73 74
74static const struct nicvf_stat nicvf_queue_stats[] = { 75static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
126 127
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{ 129{
130 struct nicvf *nic = netdev_priv(netdev);
129 int stats, qidx; 131 int stats, qidx;
130 132
131 if (sset != ETH_SS_STATS) 133 if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
141 data += ETH_GSTRING_LEN; 143 data += ETH_GSTRING_LEN;
142 } 144 }
143 145
144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
146 sprintf(data, "rxq%d: %s", qidx, 148 sprintf(data, "rxq%d: %s", qidx,
147 nicvf_queue_stats[stats].name); 149 nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
149 } 151 }
150 } 152 }
151 153
152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
154 sprintf(data, "txq%d: %s", qidx, 156 sprintf(data, "txq%d: %s", qidx,
155 nicvf_queue_stats[stats].name); 157 nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
170 172
171static int nicvf_get_sset_count(struct net_device *netdev, int sset) 173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
172{ 174{
175 struct nicvf *nic = netdev_priv(netdev);
176
173 if (sset != ETH_SS_STATS) 177 if (sset != ETH_SS_STATS)
174 return -EINVAL; 178 return -EINVAL;
175 179
176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
177 (nicvf_n_queue_stats * 181 (nicvf_n_queue_stats *
178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
180} 184}
181 185
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
197 *(data++) = ((u64 *)&nic->drv_stats) 201 *(data++) = ((u64 *)&nic->drv_stats)
198 [nicvf_drv_stats[stat].index]; 202 [nicvf_drv_stats[stat].index];
199 203
200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
203 [nicvf_queue_stats[stat].index]; 207 [nicvf_queue_stats[stat].index];
204 } 208 }
205 209
206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
209 [nicvf_queue_stats[stat].index]; 213 [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
543{ 547{
544 struct nicvf *nic = netdev_priv(dev); 548 struct nicvf *nic = netdev_priv(dev);
545 int err = 0; 549 int err = 0;
550 bool if_up = netif_running(dev);
546 551
547 if (!channel->rx_count || !channel->tx_count) 552 if (!channel->rx_count || !channel->tx_count)
548 return -EINVAL; 553 return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
552 return -EINVAL; 557 return -EINVAL;
553 558
559 if (if_up)
560 nicvf_stop(dev);
561
554 nic->qs->rq_cnt = channel->rx_count; 562 nic->qs->rq_cnt = channel->rx_count;
555 nic->qs->sq_cnt = channel->tx_count; 563 nic->qs->sq_cnt = channel->tx_count;
556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
559 if (err) 567 if (err)
560 return err; 568 return err;
561 569
562 if (!netif_running(dev)) 570 if (if_up)
563 return err; 571 nicvf_open(dev);
564 572
565 nicvf_stop(dev);
566 nicvf_open(dev);
567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
568 nic->qs->sq_cnt, nic->qs->rq_cnt); 574 nic->qs->sq_cnt, nic->qs->rq_cnt);
569 575
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
234 nic->duplex == DUPLEX_FULL ? 234 nic->duplex == DUPLEX_FULL ?
235 "Full duplex" : "Half duplex"); 235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic->netdev); 236 netif_carrier_on(nic->netdev);
237 netif_tx_wake_all_queues(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev);
238 } else { 238 } else {
239 netdev_info(nic->netdev, "%s: Link is Down\n", 239 netdev_info(nic->netdev, "%s: Link is Down\n",
240 nic->netdev->name); 240 nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
425 if (skb) { 425 if (skb) {
426 prefetch(skb); 426 prefetch(skb);
427 dev_consume_skb_any(skb); 427 dev_consume_skb_any(skb);
428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
428 } 429 }
429} 430}
430 431
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
476static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 477static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
477 struct napi_struct *napi, int budget) 478 struct napi_struct *napi, int budget)
478{ 479{
479 int processed_cqe, work_done = 0; 480 int processed_cqe, work_done = 0, tx_done = 0;
480 int cqe_count, cqe_head; 481 int cqe_count, cqe_head;
481 struct nicvf *nic = netdev_priv(netdev); 482 struct nicvf *nic = netdev_priv(netdev);
482 struct queue_set *qs = nic->qs; 483 struct queue_set *qs = nic->qs;
483 struct cmp_queue *cq = &qs->cq[cq_idx]; 484 struct cmp_queue *cq = &qs->cq[cq_idx];
484 struct cqe_rx_t *cq_desc; 485 struct cqe_rx_t *cq_desc;
486 struct netdev_queue *txq;
485 487
486 spin_lock_bh(&cq->lock); 488 spin_lock_bh(&cq->lock);
487loop: 489loop:
@@ -496,8 +498,8 @@ loop:
496 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
497 cqe_head &= 0xFFFF; 499 cqe_head &= 0xFFFF;
498 500
499 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
500 __func__, cqe_count, cqe_head); 502 __func__, cq_idx, cqe_count, cqe_head);
501 while (processed_cqe < cqe_count) { 503 while (processed_cqe < cqe_count) {
502 /* Get the CQ descriptor */ 504 /* Get the CQ descriptor */
503 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
511 break; 513 break;
512 } 514 }
513 515
514 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
515 cq_desc->cqe_type); 517 cq_idx, cq_desc->cqe_type);
516 switch (cq_desc->cqe_type) { 518 switch (cq_desc->cqe_type) {
517 case CQE_TYPE_RX: 519 case CQE_TYPE_RX:
518 nicvf_rcv_pkt_handler(netdev, napi, cq, 520 nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
522 case CQE_TYPE_SEND: 524 case CQE_TYPE_SEND:
523 nicvf_snd_pkt_handler(netdev, cq, 525 nicvf_snd_pkt_handler(netdev, cq,
524 (void *)cq_desc, CQE_TYPE_SEND); 526 (void *)cq_desc, CQE_TYPE_SEND);
527 tx_done++;
525 break; 528 break;
526 case CQE_TYPE_INVALID: 529 case CQE_TYPE_INVALID:
527 case CQE_TYPE_RX_SPLIT: 530 case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
532 } 535 }
533 processed_cqe++; 536 processed_cqe++;
534 } 537 }
535 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", 538 netdev_dbg(nic->netdev,
536 __func__, processed_cqe, work_done, budget); 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__, cq_idx, processed_cqe, work_done, budget);
537 541
538 /* Ring doorbell to inform H/W to reuse processed CQEs */ 542 /* Ring doorbell to inform H/W to reuse processed CQEs */
539 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
543 goto loop; 547 goto loop;
544 548
545done: 549done:
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
551 if (tx_done) {
552 txq = netdev_get_tx_queue(netdev, cq_idx);
553 if (netif_tx_queue_stopped(txq)) {
554 netif_tx_start_queue(txq);
555 nic->drv_stats.txq_wake++;
556 if (netif_msg_tx_err(nic))
557 netdev_warn(netdev,
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev->name, cq_idx);
560 }
561 }
562
546 spin_unlock_bh(&cq->lock); 563 spin_unlock_bh(&cq->lock);
547 return work_done; 564 return work_done;
548} 565}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
554 struct net_device *netdev = napi->dev; 571 struct net_device *netdev = napi->dev;
555 struct nicvf *nic = netdev_priv(netdev); 572 struct nicvf *nic = netdev_priv(netdev);
556 struct nicvf_cq_poll *cq; 573 struct nicvf_cq_poll *cq;
557 struct netdev_queue *txq;
558 574
559 cq = container_of(napi, struct nicvf_cq_poll, napi); 575 cq = container_of(napi, struct nicvf_cq_poll, napi);
560 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
561 577
562 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
563 if (netif_tx_queue_stopped(txq))
564 netif_tx_wake_queue(txq);
565
566 if (work_done < budget) { 578 if (work_done < budget) {
567 /* Slow packet rate, exit polling */ 579 /* Slow packet rate, exit polling */
568 napi_complete(napi); 580 napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
833 return NETDEV_TX_OK; 845 return NETDEV_TX_OK;
834 } 846 }
835 847
836 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
837 netif_tx_stop_queue(txq); 849 netif_tx_stop_queue(txq);
838 nic->drv_stats.tx_busy++; 850 nic->drv_stats.txq_stop++;
839 if (netif_msg_tx_err(nic)) 851 if (netif_msg_tx_err(nic))
840 netdev_warn(netdev, 852 netdev_warn(netdev,
841 "%s: Transmit ring full, stopping SQ%d\n", 853 "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
859 nicvf_send_msg_to_pf(nic, &mbx); 871 nicvf_send_msg_to_pf(nic, &mbx);
860 872
861 netif_carrier_off(netdev); 873 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
863 874
864 /* Disable RBDR & QS error interrupts */ 875 /* Disable RBDR & QS error interrupts */
865 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
894 kfree(cq_poll); 905 kfree(cq_poll);
895 } 906 }
896 907
908 netif_tx_disable(netdev);
909
897 /* Free resources */ 910 /* Free resources */
898 nicvf_config_data_transfer(nic, false); 911 nicvf_config_data_transfer(nic, false);
899 912
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
988 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
989 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
990 1003
1004 nic->drv_stats.txq_stop = 0;
1005 nic->drv_stats.txq_wake = 0;
1006
991 netif_carrier_on(netdev); 1007 netif_carrier_on(netdev);
992 netif_tx_start_all_queues(netdev); 1008 netif_tx_start_all_queues(netdev);
993 1009
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1278 netdev->hw_features = netdev->features; 1294 netdev->hw_features = netdev->features;
1279 1295
1280 netdev->netdev_ops = &nicvf_netdev_ops; 1296 netdev->netdev_ops = &nicvf_netdev_ops;
1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1281 1298
1282 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1283 1300
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
1318 pci_disable_device(pdev); 1335 pci_disable_device(pdev);
1319} 1336}
1320 1337
1338static void nicvf_shutdown(struct pci_dev *pdev)
1339{
1340 nicvf_remove(pdev);
1341}
1342
1321static struct pci_driver nicvf_driver = { 1343static struct pci_driver nicvf_driver = {
1322 .name = DRV_NAME, 1344 .name = DRV_NAME,
1323 .id_table = nicvf_id_table, 1345 .id_table = nicvf_id_table,
1324 .probe = nicvf_probe, 1346 .probe = nicvf_probe,
1325 .remove = nicvf_remove, 1347 .remove = nicvf_remove,
1348 .shutdown = nicvf_shutdown,
1326}; 1349};
1327 1350
1328static int __init nicvf_init_module(void) 1351static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
103 103
104 /* Allocate a new page */ 104 /* Allocate a new page */
105 if (!nic->rb_page) { 105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
107 if (!nic->rb_page) { 108 if (!nic->rb_page) {
108 netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); 109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
109 return -ENOMEM; 111 return -ENOMEM;
110 } 112 }
111 nic->rb_page_offset = 0; 113 nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
382 return; 384 return;
383 385
384 if (sq->tso_hdrs) 386 if (sq->tso_hdrs)
385 dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, 387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
386 sq->tso_hdrs, sq->tso_hdrs_phys); 389 sq->tso_hdrs, sq->tso_hdrs_phys);
387 390
388 kfree(sq->skbuff); 391 kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
863 continue; 866 continue;
864 } 867 }
865 skb = (struct sk_buff *)sq->skbuff[sq->head]; 868 skb = (struct sk_buff *)sq->skbuff[sq->head];
869 if (skb)
870 dev_kfree_skb_any(skb);
866 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 atomic64_add(hdr->tot_len, 872 atomic64_add(hdr->tot_len,
868 (atomic64_t *)&netdev->stats.tx_bytes); 873 (atomic64_t *)&netdev->stats.tx_bytes);
869 dev_kfree_skb_any(skb);
870 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 } 875 }
872} 876}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 996
993 memset(gather, 0, SND_QUEUE_DESC_SIZE); 997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
996 gather->size = size; 1000 gather->size = size;
997 gather->addr = data; 1001 gather->addr = data;
998} 1002}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1048 } 1052 }
1049 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 seg_subdescs - 1, skb, seg_len); 1054 seg_subdescs - 1, skb, seg_len);
1051 sq->skbuff[hdr_qentry] = 0; 1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1052 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 1057
1054 desc_cnt += seg_subdescs; 1058 desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1062 /* Inform HW to xmit all TSO segments */ 1066 /* Inform HW to xmit all TSO segments */
1063 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 skb_get_queue_mapping(skb), desc_cnt); 1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1065 return 1; 1070 return 1;
1066} 1071}
1067 1072
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
62#define SND_QUEUE_CNT 8 62#define SND_QUEUE_CNT 8
63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
64 64
65#define SND_QSIZE SND_QUEUE_SIZE4 65#define SND_QSIZE SND_QUEUE_SIZE2
66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68#define SND_QUEUE_THRESH 2ULL 68#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
70/* Since timestamp not enabled, otherwise 2 */ 70/* Since timestamp not enabled, otherwise 2 */
71#define MAX_CQE_PER_PKT_XMIT 1 71#define MAX_CQE_PER_PKT_XMIT 1
72 72
73#define CMP_QSIZE CMP_QUEUE_SIZE4 73/* Keep CQ and SQ sizes same, if timestamping
74 * is enabled this equation will change.
75 */
76#define CMP_QSIZE CMP_QUEUE_SIZE2
74#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
76#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
87 90
88#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 91#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 MAX_CQE_PER_PKT_XMIT) 92 MAX_CQE_PER_PKT_XMIT)
90#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 93/* Calculate number of CQEs to reserve for all SQEs.
94 * Its 1/256th level of CQ size.
95 * '+ 1' to account for pipelining
96 */
97#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
98 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
91 99
92/* Descriptor size in bytes */ 100/* Descriptor size in bytes */
93#define SND_QUEUE_DESC_SIZE 16 101#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
674 bgx_flush_dmac_addrs(bgx, lmacid); 674 bgx_flush_dmac_addrs(bgx, lmacid);
675 675
676 if (lmac->phydev) 676 if ((bgx->lmac_type != BGX_MODE_XFI) &&
677 (bgx->lmac_type != BGX_MODE_XLAUI) &&
678 (bgx->lmac_type != BGX_MODE_40G_KR) &&
679 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
677 phy_disconnect(lmac->phydev); 680 phy_disconnect(lmac->phydev);
678 681
679 lmac->phydev = NULL; 682 lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 484eb8c37489..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -952,16 +952,23 @@ static int devlog_show(struct seq_file *seq, void *v)
952 * eventually have to put a format interpreter in here ... 952 * eventually have to put a format interpreter in here ...
953 */ 953 */
954 seq_printf(seq, "%10d %15llu %8s %8s ", 954 seq_printf(seq, "%10d %15llu %8s %8s ",
955 e->seqno, e->timestamp, 955 be32_to_cpu(e->seqno),
956 be64_to_cpu(e->timestamp),
956 (e->level < ARRAY_SIZE(devlog_level_strings) 957 (e->level < ARRAY_SIZE(devlog_level_strings)
957 ? devlog_level_strings[e->level] 958 ? devlog_level_strings[e->level]
958 : "UNKNOWN"), 959 : "UNKNOWN"),
959 (e->facility < ARRAY_SIZE(devlog_facility_strings) 960 (e->facility < ARRAY_SIZE(devlog_facility_strings)
960 ? devlog_facility_strings[e->facility] 961 ? devlog_facility_strings[e->facility]
961 : "UNKNOWN")); 962 : "UNKNOWN"));
962 seq_printf(seq, e->fmt, e->params[0], e->params[1], 963 seq_printf(seq, e->fmt,
963 e->params[2], e->params[3], e->params[4], 964 be32_to_cpu(e->params[0]),
964 e->params[5], e->params[6], e->params[7]); 965 be32_to_cpu(e->params[1]),
966 be32_to_cpu(e->params[2]),
967 be32_to_cpu(e->params[3]),
968 be32_to_cpu(e->params[4]),
969 be32_to_cpu(e->params[5]),
970 be32_to_cpu(e->params[6]),
971 be32_to_cpu(e->params[7]));
965 } 972 }
966 return 0; 973 return 0;
967} 974}
@@ -1043,23 +1050,17 @@ static int devlog_open(struct inode *inode, struct file *file)
1043 return ret; 1050 return ret;
1044 } 1051 }
1045 1052
1046 /* Translate log multi-byte integral elements into host native format 1053 /* Find the earliest (lowest Sequence Number) log entry in the
1047 * and determine where the first entry in the log is. 1054 * circular Device Log.
1048 */ 1055 */
1049 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) { 1056 for (fseqno = ~((u32)0), index = 0; index < dinfo->nentries; index++) {
1050 struct fw_devlog_e *e = &dinfo->log[index]; 1057 struct fw_devlog_e *e = &dinfo->log[index];
1051 int i;
1052 __u32 seqno; 1058 __u32 seqno;
1053 1059
1054 if (e->timestamp == 0) 1060 if (e->timestamp == 0)
1055 continue; 1061 continue;
1056 1062
1057 e->timestamp = (__force __be64)be64_to_cpu(e->timestamp);
1058 seqno = be32_to_cpu(e->seqno); 1063 seqno = be32_to_cpu(e->seqno);
1059 for (i = 0; i < 8; i++)
1060 e->params[i] =
1061 (__force __be32)be32_to_cpu(e->params[i]);
1062
1063 if (seqno < fseqno) { 1064 if (seqno < fseqno) {
1064 fseqno = seqno; 1065 fseqno = seqno;
1065 dinfo->first = index; 1066 dinfo->first = index;
@@ -2331,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
2331 EXT_MEM1_SIZE_G(size)); 2332 EXT_MEM1_SIZE_G(size));
2332 } 2333 }
2333 } else { 2334 } else {
2334 if (i & EXT_MEM_ENABLE_F) 2335 if (i & EXT_MEM_ENABLE_F) {
2335 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); 2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
2336 add_debugfs_mem(adap, "mc", MEM_MC, 2337 add_debugfs_mem(adap, "mc", MEM_MC,
2337 EXT_MEM_SIZE_G(size)); 2338 EXT_MEM_SIZE_G(size));
2339 }
2338 } 2340 }
2339 2341
2340 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, 2342 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index da2004e2a741..918a8e42139b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1170,7 +1170,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1170 wq_work_done, 1170 wq_work_done,
1171 0 /* dont unmask intr */, 1171 0 /* dont unmask intr */,
1172 0 /* dont reset intr timer */); 1172 0 /* dont reset intr timer */);
1173 return rq_work_done; 1173 return budget;
1174 } 1174 }
1175 1175
1176 if (budget > 0) 1176 if (budget > 0)
@@ -1191,6 +1191,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
1191 0 /* don't reset intr timer */); 1191 0 /* don't reset intr timer */);
1192 1192
1193 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); 1193 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1194 enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
1194 1195
1195 /* Buffer allocation failed. Stay in polling 1196 /* Buffer allocation failed. Stay in polling
1196 * mode so we can try to fill the ring again. 1197 * mode so we can try to fill the ring again.
@@ -1208,7 +1209,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
1208 napi_complete(napi); 1209 napi_complete(napi);
1209 vnic_intr_unmask(&enic->intr[intr]); 1210 vnic_intr_unmask(&enic->intr[intr]);
1210 } 1211 }
1211 enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
1212 1212
1213 return rq_work_done; 1213 return rq_work_done;
1214} 1214}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
621 BE_IF_FLAGS_MCAST_PROMISCUOUS) 621 BE_IF_FLAGS_MCAST_PROMISCUOUS)
622 622
623#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
624 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
625
626#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
627
623/* An RX interface is an object with one or more MAC addresses and 628/* An RX interface is an object with one or more MAC addresses and
624 * filtering capabilities. */ 629 * filtering capabilities. */
625struct be_cmd_req_if_create { 630struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..6ca693b03f33 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0; 274 return 0;
275 275
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT 280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address. 281 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the 282 * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
307 status = -EPERM; 311 status = -EPERM;
308 goto err; 312 goto err;
309 } 313 }
310 314done:
311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
312 dev_info(dev, "MAC address changed to %pM\n", mac); 316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
313 return 0; 317 return 0;
314err: 318err:
315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); 2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2448} 2452}
2449 2453
2450static void be_rx_cq_clean(struct be_rx_obj *rxo) 2454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2451{ 2456{
2452 struct be_rx_page_info *page_info;
2453 struct be_queue_info *rxq = &rxo->q; 2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
2454 struct be_queue_info *rx_cq = &rxo->cq; 2472 struct be_queue_info *rx_cq = &rxo->cq;
2455 struct be_rx_compl_info *rxcp; 2473 struct be_rx_compl_info *rxcp;
2456 struct be_adapter *adapter = rxo->adapter; 2474 struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2487 2505
2488 /* After cleanup, leave the CQ in unarmed state */ 2506 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0); 2507 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
2498 rxq->tail = 0;
2499 rxq->head = 0;
2500} 2508}
2501 2509
2502static void be_tx_compl_clean(struct be_adapter *adapter) 2510static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2577 napi_hash_del(&eqo->napi); 2585 napi_hash_del(&eqo->napi);
2578 netif_napi_del(&eqo->napi); 2586 netif_napi_del(&eqo->napi);
2587 free_cpumask_var(eqo->affinity_mask);
2579 } 2588 }
2580 free_cpumask_var(eqo->affinity_mask);
2581 be_queue_free(adapter, &eqo->q); 2589 be_queue_free(adapter, &eqo->q);
2582 } 2590 }
2583} 2591}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2594 2602
2595 for_all_evt_queues(adapter, eqo, i) { 2603 for_all_evt_queues(adapter, eqo, i) {
2596 int numa_node = dev_to_node(&adapter->pdev->dev); 2604 int numa_node = dev_to_node(&adapter->pdev->dev);
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2605
2598 return -ENOMEM;
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
2603 napi_hash_add(&eqo->napi);
2604 aic = &adapter->aic_obj[i]; 2606 aic = &adapter->aic_obj[i];
2605 eqo->adapter = adapter; 2607 eqo->adapter = adapter;
2606 eqo->idx = i; 2608 eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2616 rc = be_cmd_eq_create(adapter, eqo); 2618 rc = be_cmd_eq_create(adapter, eqo);
2617 if (rc) 2619 if (rc)
2618 return rc; 2620 return rc;
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
2619 } 2629 }
2620 return 0; 2630 return 0;
2621} 2631}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3354 for_all_rx_queues(adapter, rxo, i) { 3364 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q; 3365 q = &rxo->q;
3356 if (q->created) { 3366 if (q->created) {
3367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
3357 be_cmd_rxq_destroy(adapter, q); 3380 be_cmd_rxq_destroy(adapter, q);
3358 be_rx_cq_clean(rxo); 3381 be_rx_cq_clean(rxo);
3382 be_rxq_clean(rxo);
3359 } 3383 }
3360 be_queue_free(adapter, q); 3384 be_queue_free(adapter, q);
3361 } 3385 }
3362} 3386}
3363 3387
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
3364static int be_close(struct net_device *netdev) 3415static int be_close(struct net_device *netdev)
3365{ 3416{
3366 struct be_adapter *adapter = netdev_priv(netdev); 3417 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) 3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0; 3425 return 0;
3375 3426
3427 be_disable_if_filters(adapter);
3428
3376 be_roce_dev_close(adapter); 3429 be_roce_dev_close(adapter);
3377 3430
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
3392 be_tx_compl_clean(adapter); 3445 be_tx_compl_clean(adapter);
3393 3446
3394 be_rx_qs_destroy(adapter); 3447 be_rx_qs_destroy(adapter);
3395 be_clear_uc_list(adapter);
3396 3448
3397 for_all_evt_queues(adapter, eqo, i) { 3449 for_all_evt_queues(adapter, eqo, i) {
3398 if (msix_enabled(adapter)) 3450 if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3477 return 0; 3529 return 0;
3478} 3530}
3479 3531
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
3480static int be_open(struct net_device *netdev) 3557static int be_open(struct net_device *netdev)
3481{ 3558{
3482 struct be_adapter *adapter = netdev_priv(netdev); 3559 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
3490 if (status) 3567 if (status)
3491 goto err; 3568 goto err;
3492 3569
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
3493 status = be_irq_register(adapter); 3574 status = be_irq_register(adapter);
3494 if (status) 3575 if (status)
3495 goto err; 3576 goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
3686 } 3767 }
3687} 3768}
3688 3769
3689static void be_mac_clear(struct be_adapter *adapter)
3690{
3691 if (adapter->pmac_id) {
3692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
3694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
3699#ifdef CONFIG_BE2NET_VXLAN 3770#ifdef CONFIG_BE2NET_VXLAN
3700static void be_disable_vxlan_offloads(struct be_adapter *adapter) 3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{ 3772{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
3770#ifdef CONFIG_BE2NET_VXLAN 3841#ifdef CONFIG_BE2NET_VXLAN
3771 be_disable_vxlan_offloads(adapter); 3842 be_disable_vxlan_offloads(adapter);
3772#endif 3843#endif
3773 /* delete the primary mac along with the uc-mac list */ 3844 kfree(adapter->pmac_id);
3774 be_mac_clear(adapter); 3845 adapter->pmac_id = NULL;
3775 3846
3776 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3777 3848
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
3782 return 0; 3853 return 0;
3783} 3854}
3784 3855
3785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
3789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
3793
3794 en_flags &= cap_flags;
3795
3796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3797}
3798
3799static int be_vfs_if_create(struct be_adapter *adapter) 3856static int be_vfs_if_create(struct be_adapter *adapter)
3800{ 3857{
3801 struct be_resources res = {0}; 3858 struct be_resources res = {0};
3859 u32 cap_flags, en_flags, vf;
3802 struct be_vf_cfg *vf_cfg; 3860 struct be_vf_cfg *vf_cfg;
3803 u32 cap_flags, vf;
3804 int status; 3861 int status;
3805 3862
3806 /* If a FW profile exists, then cap_flags are updated */ 3863 /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3821 } 3878 }
3822 } 3879 }
3823 3880
3824 status = be_if_create(adapter, &vf_cfg->if_handle, 3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3825 cap_flags, vf + 1); 3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
3826 if (status) 3887 if (status)
3827 return status; 3888 return status;
3828 } 3889 }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
4194 4255
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 } 4258 }
4201 4259
4202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
4206 return 0; 4260 return 0;
4207} 4261}
4208 4262
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
4342static int be_setup(struct be_adapter *adapter) 4396static int be_setup(struct be_adapter *adapter)
4343{ 4397{
4344 struct device *dev = &adapter->pdev->dev; 4398 struct device *dev = &adapter->pdev->dev;
4399 u32 en_flags;
4345 int status; 4400 int status;
4346 4401
4347 status = be_func_init(adapter); 4402 status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
4364 if (status) 4419 if (status)
4365 goto err; 4420 goto err;
4366 4421
4367 status = be_if_create(adapter, &adapter->if_handle, 4422 /* will enable all the needed filter flags in be_open() */
4368 be_if_cap_flags(adapter), 0); 4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
4369 if (status) 4427 if (status)
4370 goto err; 4428 goto err;
4371 4429
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); 4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 } 4450 }
4393 4451
4394 if (adapter->vlans_added)
4395 be_vid_config(adapter);
4396
4397 be_set_rx_mode(adapter->netdev);
4398
4399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc); 4453 adapter->rx_fc);
4401 if (status) 4454 if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5121 struct device *dev = &adapter->pdev->dev; 5174 struct device *dev = &adapter->pdev->dev;
5122 int status; 5175 int status;
5123 5176
5124 if (lancer_chip(adapter) || BEx_chip(adapter)) 5177 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5125 return; 5178 return;
5126 5179
5127 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { 5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5168{ 5221{
5169 struct be_adapter *adapter = netdev_priv(netdev); 5222 struct be_adapter *adapter = netdev_priv(netdev);
5170 5223
5171 if (lancer_chip(adapter) || BEx_chip(adapter)) 5224 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5172 return; 5225 return;
5173 5226
5174 if (adapter->vxlan_port != port) 5227 if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
562}; 562};
563 563
564void fec_ptp_init(struct platform_device *pdev); 564void fec_ptp_init(struct platform_device *pdev);
565void fec_ptp_stop(struct platform_device *pdev);
565void fec_ptp_start_cyclecounter(struct net_device *ndev); 566void fec_ptp_start_cyclecounter(struct net_device *ndev);
566int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 567int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
567int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 568int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1f89c59b4353..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/pm_runtime.h>
27#include <linux/ptrace.h> 28#include <linux/ptrace.h>
28#include <linux/errno.h> 29#include <linux/errno.h>
29#include <linux/ioport.h> 30#include <linux/ioport.h>
@@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
77#define FEC_ENET_RAEM_V 0x8 78#define FEC_ENET_RAEM_V 0x8
78#define FEC_ENET_RAFL_V 0x8 79#define FEC_ENET_RAFL_V 0x8
79#define FEC_ENET_OPD_V 0xFFF0 80#define FEC_ENET_OPD_V 0xFFF0
81#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
80 82
81static struct platform_device_id fec_devtype[] = { 83static struct platform_device_id fec_devtype[] = {
82 { 84 {
@@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev)
1767static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1769static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1768{ 1770{
1769 struct fec_enet_private *fep = bus->priv; 1771 struct fec_enet_private *fep = bus->priv;
1772 struct device *dev = &fep->pdev->dev;
1770 unsigned long time_left; 1773 unsigned long time_left;
1774 int ret = 0;
1775
1776 ret = pm_runtime_get_sync(dev);
1777 if (IS_ERR_VALUE(ret))
1778 return ret;
1771 1779
1772 fep->mii_timeout = 0; 1780 fep->mii_timeout = 0;
1773 init_completion(&fep->mdio_done); 1781 init_completion(&fep->mdio_done);
@@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
1783 if (time_left == 0) { 1791 if (time_left == 0) {
1784 fep->mii_timeout = 1; 1792 fep->mii_timeout = 1;
1785 netdev_err(fep->netdev, "MDIO read timeout\n"); 1793 netdev_err(fep->netdev, "MDIO read timeout\n");
1786 return -ETIMEDOUT; 1794 ret = -ETIMEDOUT;
1795 goto out;
1787 } 1796 }
1788 1797
1789 /* return value */ 1798 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
1790 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1799
1800out:
1801 pm_runtime_mark_last_busy(dev);
1802 pm_runtime_put_autosuspend(dev);
1803
1804 return ret;
1791} 1805}
1792 1806
1793static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1807static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1794 u16 value) 1808 u16 value)
1795{ 1809{
1796 struct fec_enet_private *fep = bus->priv; 1810 struct fec_enet_private *fep = bus->priv;
1811 struct device *dev = &fep->pdev->dev;
1797 unsigned long time_left; 1812 unsigned long time_left;
1813 int ret = 0;
1814
1815 ret = pm_runtime_get_sync(dev);
1816 if (IS_ERR_VALUE(ret))
1817 return ret;
1798 1818
1799 fep->mii_timeout = 0; 1819 fep->mii_timeout = 0;
1800 init_completion(&fep->mdio_done); 1820 init_completion(&fep->mdio_done);
@@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
1811 if (time_left == 0) { 1831 if (time_left == 0) {
1812 fep->mii_timeout = 1; 1832 fep->mii_timeout = 1;
1813 netdev_err(fep->netdev, "MDIO write timeout\n"); 1833 netdev_err(fep->netdev, "MDIO write timeout\n");
1814 return -ETIMEDOUT; 1834 ret = -ETIMEDOUT;
1815 } 1835 }
1816 1836
1817 return 0; 1837 pm_runtime_mark_last_busy(dev);
1838 pm_runtime_put_autosuspend(dev);
1839
1840 return ret;
1818} 1841}
1819 1842
1820static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1843static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
@@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1826 ret = clk_prepare_enable(fep->clk_ahb); 1849 ret = clk_prepare_enable(fep->clk_ahb);
1827 if (ret) 1850 if (ret)
1828 return ret; 1851 return ret;
1829 ret = clk_prepare_enable(fep->clk_ipg);
1830 if (ret)
1831 goto failed_clk_ipg;
1832 if (fep->clk_enet_out) { 1852 if (fep->clk_enet_out) {
1833 ret = clk_prepare_enable(fep->clk_enet_out); 1853 ret = clk_prepare_enable(fep->clk_enet_out);
1834 if (ret) 1854 if (ret)
@@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1852 } 1872 }
1853 } else { 1873 } else {
1854 clk_disable_unprepare(fep->clk_ahb); 1874 clk_disable_unprepare(fep->clk_ahb);
1855 clk_disable_unprepare(fep->clk_ipg);
1856 if (fep->clk_enet_out) 1875 if (fep->clk_enet_out)
1857 clk_disable_unprepare(fep->clk_enet_out); 1876 clk_disable_unprepare(fep->clk_enet_out);
1858 if (fep->clk_ptp) { 1877 if (fep->clk_ptp) {
@@ -1874,8 +1893,6 @@ failed_clk_ptp:
1874 if (fep->clk_enet_out) 1893 if (fep->clk_enet_out)
1875 clk_disable_unprepare(fep->clk_enet_out); 1894 clk_disable_unprepare(fep->clk_enet_out);
1876failed_clk_enet_out: 1895failed_clk_enet_out:
1877 clk_disable_unprepare(fep->clk_ipg);
1878failed_clk_ipg:
1879 clk_disable_unprepare(fep->clk_ahb); 1896 clk_disable_unprepare(fep->clk_ahb);
1880 1897
1881 return ret; 1898 return ret;
@@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev)
2847 struct fec_enet_private *fep = netdev_priv(ndev); 2864 struct fec_enet_private *fep = netdev_priv(ndev);
2848 int ret; 2865 int ret;
2849 2866
2867 ret = pm_runtime_get_sync(&fep->pdev->dev);
2868 if (IS_ERR_VALUE(ret))
2869 return ret;
2870
2850 pinctrl_pm_select_default_state(&fep->pdev->dev); 2871 pinctrl_pm_select_default_state(&fep->pdev->dev);
2851 ret = fec_enet_clk_enable(ndev, true); 2872 ret = fec_enet_clk_enable(ndev, true);
2852 if (ret) 2873 if (ret)
2853 return ret; 2874 goto clk_enable;
2854 2875
2855 /* I should reset the ring buffers here, but I don't yet know 2876 /* I should reset the ring buffers here, but I don't yet know
2856 * a simple way to do that. 2877 * a simple way to do that.
@@ -2881,6 +2902,9 @@ err_enet_mii_probe:
2881 fec_enet_free_buffers(ndev); 2902 fec_enet_free_buffers(ndev);
2882err_enet_alloc: 2903err_enet_alloc:
2883 fec_enet_clk_enable(ndev, false); 2904 fec_enet_clk_enable(ndev, false);
2905clk_enable:
2906 pm_runtime_mark_last_busy(&fep->pdev->dev);
2907 pm_runtime_put_autosuspend(&fep->pdev->dev);
2884 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2908 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2885 return ret; 2909 return ret;
2886} 2910}
@@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev)
2903 2927
2904 fec_enet_clk_enable(ndev, false); 2928 fec_enet_clk_enable(ndev, false);
2905 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2929 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
2930 pm_runtime_mark_last_busy(&fep->pdev->dev);
2931 pm_runtime_put_autosuspend(&fep->pdev->dev);
2932
2906 fec_enet_free_buffers(ndev); 2933 fec_enet_free_buffers(ndev);
2907 2934
2908 return 0; 2935 return 0;
@@ -3115,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
3115 fep->bufdesc_size; 3142 fep->bufdesc_size;
3116 3143
3117 /* Allocate memory for buffer descriptors. */ 3144 /* Allocate memory for buffer descriptors. */
3118 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 3145 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3119 GFP_KERNEL); 3146 GFP_KERNEL);
3120 if (!cbd_base) { 3147 if (!cbd_base) {
3121 return -ENOMEM; 3148 return -ENOMEM;
3122 } 3149 }
@@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev)
3388 if (ret) 3415 if (ret)
3389 goto failed_clk; 3416 goto failed_clk;
3390 3417
3418 ret = clk_prepare_enable(fep->clk_ipg);
3419 if (ret)
3420 goto failed_clk_ipg;
3421
3391 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3422 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
3392 if (!IS_ERR(fep->reg_phy)) { 3423 if (!IS_ERR(fep->reg_phy)) {
3393 ret = regulator_enable(fep->reg_phy); 3424 ret = regulator_enable(fep->reg_phy);
@@ -3400,6 +3431,12 @@ fec_probe(struct platform_device *pdev)
3400 fep->reg_phy = NULL; 3431 fep->reg_phy = NULL;
3401 } 3432 }
3402 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_get_noresume(&pdev->dev);
3437 pm_runtime_set_active(&pdev->dev);
3438 pm_runtime_enable(&pdev->dev);
3439
3403 fec_reset_phy(pdev); 3440 fec_reset_phy(pdev);
3404 3441
3405 if (fep->bufdesc_ex) 3442 if (fep->bufdesc_ex)
@@ -3447,6 +3484,10 @@ fec_probe(struct platform_device *pdev)
3447 3484
3448 fep->rx_copybreak = COPYBREAK_DEFAULT; 3485 fep->rx_copybreak = COPYBREAK_DEFAULT;
3449 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3486 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3487
3488 pm_runtime_mark_last_busy(&pdev->dev);
3489 pm_runtime_put_autosuspend(&pdev->dev);
3490
3450 return 0; 3491 return 0;
3451 3492
3452failed_register: 3493failed_register:
@@ -3454,9 +3495,12 @@ failed_register:
3454failed_mii_init: 3495failed_mii_init:
3455failed_irq: 3496failed_irq:
3456failed_init: 3497failed_init:
3498 fec_ptp_stop(pdev);
3457 if (fep->reg_phy) 3499 if (fep->reg_phy)
3458 regulator_disable(fep->reg_phy); 3500 regulator_disable(fep->reg_phy);
3459failed_regulator: 3501failed_regulator:
3502 clk_disable_unprepare(fep->clk_ipg);
3503failed_clk_ipg:
3460 fec_enet_clk_enable(ndev, false); 3504 fec_enet_clk_enable(ndev, false);
3461failed_clk: 3505failed_clk:
3462failed_phy: 3506failed_phy:
@@ -3473,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev)
3473 struct net_device *ndev = platform_get_drvdata(pdev); 3517 struct net_device *ndev = platform_get_drvdata(pdev);
3474 struct fec_enet_private *fep = netdev_priv(ndev); 3518 struct fec_enet_private *fep = netdev_priv(ndev);
3475 3519
3476 cancel_delayed_work_sync(&fep->time_keep);
3477 cancel_work_sync(&fep->tx_timeout_work); 3520 cancel_work_sync(&fep->tx_timeout_work);
3521 fec_ptp_stop(pdev);
3478 unregister_netdev(ndev); 3522 unregister_netdev(ndev);
3479 fec_enet_mii_remove(fep); 3523 fec_enet_mii_remove(fep);
3480 if (fep->reg_phy) 3524 if (fep->reg_phy)
3481 regulator_disable(fep->reg_phy); 3525 regulator_disable(fep->reg_phy);
3482 if (fep->ptp_clock)
3483 ptp_clock_unregister(fep->ptp_clock);
3484 of_node_put(fep->phy_node); 3526 of_node_put(fep->phy_node);
3485 free_netdev(ndev); 3527 free_netdev(ndev);
3486 3528
@@ -3568,7 +3610,28 @@ failed_clk:
3568 return ret; 3610 return ret;
3569} 3611}
3570 3612
3571static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); 3613static int __maybe_unused fec_runtime_suspend(struct device *dev)
3614{
3615 struct net_device *ndev = dev_get_drvdata(dev);
3616 struct fec_enet_private *fep = netdev_priv(ndev);
3617
3618 clk_disable_unprepare(fep->clk_ipg);
3619
3620 return 0;
3621}
3622
3623static int __maybe_unused fec_runtime_resume(struct device *dev)
3624{
3625 struct net_device *ndev = dev_get_drvdata(dev);
3626 struct fec_enet_private *fep = netdev_priv(ndev);
3627
3628 return clk_prepare_enable(fep->clk_ipg);
3629}
3630
3631static const struct dev_pm_ops fec_pm_ops = {
3632 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
3633 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
3634};
3572 3635
3573static struct platform_driver fec_driver = { 3636static struct platform_driver fec_driver = {
3574 .driver = { 3637 .driver = {
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
604 schedule_delayed_work(&fep->time_keep, HZ); 604 schedule_delayed_work(&fep->time_keep, HZ);
605} 605}
606 606
607void fec_ptp_stop(struct platform_device *pdev)
608{
609 struct net_device *ndev = platform_get_drvdata(pdev);
610 struct fec_enet_private *fep = netdev_priv(ndev);
611
612 cancel_delayed_work_sync(&fep->time_keep);
613 if (fep->ptp_clock)
614 ptp_clock_unregister(fep->ptp_clock);
615}
616
607/** 617/**
608 * fec_ptp_check_pps_event 618 * fec_ptp_check_pps_event
609 * @fep: the fec_enet_private structure handle 619 * @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
586 frag = skb_shinfo(skb)->frags; 586 frag = skb_shinfo(skb)->frags;
587 while (nr_frags) { 587 while (nr_frags) {
588 CBDC_SC(bdp, 588 CBDC_SC(bdp,
589 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); 589 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
590 BD_ENET_TX_TC);
590 CBDS_SC(bdp, BD_ENET_TX_READY); 591 CBDS_SC(bdp, BD_ENET_TX_READY);
591 592
592 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 593 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
110} 110}
111 111
112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) 112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) 113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
114#define FEC_RX_EVENT (FEC_ENET_RXF) 114#define FEC_RX_EVENT (FEC_ENET_RXF)
115#define FEC_TX_EVENT (FEC_ENET_TXF) 115#define FEC_TX_EVENT (FEC_ENET_TXF)
116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ 116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..10b3bbbbac8e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
565 } 565 }
566} 566}
567 567
568static void lock_tx_qs(struct gfar_private *priv)
569{
570 int i;
571
572 for (i = 0; i < priv->num_tx_queues; i++)
573 spin_lock(&priv->tx_queue[i]->txlock);
574}
575
576static void unlock_tx_qs(struct gfar_private *priv)
577{
578 int i;
579
580 for (i = 0; i < priv->num_tx_queues; i++)
581 spin_unlock(&priv->tx_queue[i]->txlock);
582}
583
584static int gfar_alloc_tx_queues(struct gfar_private *priv) 568static int gfar_alloc_tx_queues(struct gfar_private *priv)
585{ 569{
586 int i; 570 int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
1376 priv->dev = &ofdev->dev; 1360 priv->dev = &ofdev->dev;
1377 SET_NETDEV_DEV(dev, &ofdev->dev); 1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1378 1362
1379 spin_lock_init(&priv->bflock);
1380 INIT_WORK(&priv->reset_task, gfar_reset_task); 1363 INIT_WORK(&priv->reset_task, gfar_reset_task);
1381 1364
1382 platform_set_drvdata(ofdev, priv); 1365 platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
1470 goto register_fail; 1453 goto register_fail;
1471 } 1454 }
1472 1455
1473 device_init_wakeup(&dev->dev, 1456 device_set_wakeup_capable(&dev->dev, priv->device_flags &
1474 priv->device_flags & 1457 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1475 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1476 1458
1477 /* fill out IRQ number and name fields */ 1459 /* fill out IRQ number and name fields */
1478 for (i = 0; i < priv->num_grps; i++) { 1460 for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
1540 struct gfar_private *priv = dev_get_drvdata(dev); 1522 struct gfar_private *priv = dev_get_drvdata(dev);
1541 struct net_device *ndev = priv->ndev; 1523 struct net_device *ndev = priv->ndev;
1542 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1543 unsigned long flags;
1544 u32 tempval; 1525 u32 tempval;
1545
1546 int magic_packet = priv->wol_en && 1526 int magic_packet = priv->wol_en &&
1547 (priv->device_flags & 1527 (priv->device_flags &
1548 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1528 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1549 1529
1530 if (!netif_running(ndev))
1531 return 0;
1532
1533 disable_napi(priv);
1534 netif_tx_lock(ndev);
1550 netif_device_detach(ndev); 1535 netif_device_detach(ndev);
1536 netif_tx_unlock(ndev);
1551 1537
1552 if (netif_running(ndev)) { 1538 gfar_halt(priv);
1553 1539
1554 local_irq_save(flags); 1540 if (magic_packet) {
1555 lock_tx_qs(priv); 1541 /* Enable interrupt on Magic Packet */
1542 gfar_write(&regs->imask, IMASK_MAG);
1556 1543
1557 gfar_halt_nodisable(priv); 1544 /* Enable Magic Packet mode */
1545 tempval = gfar_read(&regs->maccfg2);
1546 tempval |= MACCFG2_MPEN;
1547 gfar_write(&regs->maccfg2, tempval);
1558 1548
1559 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1549 /* re-enable the Rx block */
1560 tempval = gfar_read(&regs->maccfg1); 1550 tempval = gfar_read(&regs->maccfg1);
1561 1551 tempval |= MACCFG1_RX_EN;
1562 tempval &= ~MACCFG1_TX_EN;
1563
1564 if (!magic_packet)
1565 tempval &= ~MACCFG1_RX_EN;
1566
1567 gfar_write(&regs->maccfg1, tempval); 1552 gfar_write(&regs->maccfg1, tempval);
1568 1553
1569 unlock_tx_qs(priv); 1554 } else {
1570 local_irq_restore(flags); 1555 phy_stop(priv->phydev);
1571
1572 disable_napi(priv);
1573
1574 if (magic_packet) {
1575 /* Enable interrupt on Magic Packet */
1576 gfar_write(&regs->imask, IMASK_MAG);
1577
1578 /* Enable Magic Packet mode */
1579 tempval = gfar_read(&regs->maccfg2);
1580 tempval |= MACCFG2_MPEN;
1581 gfar_write(&regs->maccfg2, tempval);
1582 } else {
1583 phy_stop(priv->phydev);
1584 }
1585 } 1556 }
1586 1557
1587 return 0; 1558 return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
1592 struct gfar_private *priv = dev_get_drvdata(dev); 1563 struct gfar_private *priv = dev_get_drvdata(dev);
1593 struct net_device *ndev = priv->ndev; 1564 struct net_device *ndev = priv->ndev;
1594 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1565 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1595 unsigned long flags;
1596 u32 tempval; 1566 u32 tempval;
1597 int magic_packet = priv->wol_en && 1567 int magic_packet = priv->wol_en &&
1598 (priv->device_flags & 1568 (priv->device_flags &
1599 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1569 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1600 1570
1601 if (!netif_running(ndev)) { 1571 if (!netif_running(ndev))
1602 netif_device_attach(ndev);
1603 return 0; 1572 return 0;
1604 }
1605 1573
1606 if (!magic_packet && priv->phydev) 1574 if (magic_packet) {
1575 /* Disable Magic Packet mode */
1576 tempval = gfar_read(&regs->maccfg2);
1577 tempval &= ~MACCFG2_MPEN;
1578 gfar_write(&regs->maccfg2, tempval);
1579 } else {
1607 phy_start(priv->phydev); 1580 phy_start(priv->phydev);
1608 1581 }
1609 /* Disable Magic Packet mode, in case something
1610 * else woke us up.
1611 */
1612 local_irq_save(flags);
1613 lock_tx_qs(priv);
1614
1615 tempval = gfar_read(&regs->maccfg2);
1616 tempval &= ~MACCFG2_MPEN;
1617 gfar_write(&regs->maccfg2, tempval);
1618 1582
1619 gfar_start(priv); 1583 gfar_start(priv);
1620 1584
1621 unlock_tx_qs(priv);
1622 local_irq_restore(flags);
1623
1624 netif_device_attach(ndev); 1585 netif_device_attach(ndev);
1625
1626 enable_napi(priv); 1586 enable_napi(priv);
1627 1587
1628 return 0; 1588 return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2045 /* Install our interrupt handlers for Error, 2005 /* Install our interrupt handlers for Error,
2046 * Transmit, and Receive 2006 * Transmit, and Receive
2047 */ 2007 */
2048 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2008 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009 IRQF_NO_SUSPEND,
2049 gfar_irq(grp, ER)->name, grp); 2010 gfar_irq(grp, ER)->name, grp);
2050 if (err < 0) { 2011 if (err < 0) {
2051 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2012 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2068 goto rx_irq_fail; 2029 goto rx_irq_fail;
2069 } 2030 }
2070 } else { 2031 } else {
2071 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2032 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033 IRQF_NO_SUSPEND,
2072 gfar_irq(grp, TX)->name, grp); 2034 gfar_irq(grp, TX)->name, grp);
2073 if (err < 0) { 2035 if (err < 0) {
2074 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2036 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2140,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
2140 /* Start Rx/Tx DMA and enable the interrupts */ 2102 /* Start Rx/Tx DMA and enable the interrupts */
2141 gfar_start(priv); 2103 gfar_start(priv);
2142 2104
2105 /* force link state update after mac reset */
2106 priv->oldlink = 0;
2107 priv->oldspeed = 0;
2108 priv->oldduplex = -1;
2109
2143 phy_start(priv->phydev); 2110 phy_start(priv->phydev);
2144 2111
2145 enable_napi(priv); 2112 enable_napi(priv);
@@ -2169,8 +2136,6 @@ static int gfar_enet_open(struct net_device *dev)
2169 if (err) 2136 if (err)
2170 return err; 2137 return err;
2171 2138
2172 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2173
2174 return err; 2139 return err;
2175} 2140}
2176 2141
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
1145 int oldduplex; 1145 int oldduplex;
1146 int oldlink; 1146 int oldlink;
1147 1147
1148 /* Bitfield update lock */
1149 spinlock_t bflock;
1150
1151 uint32_t msg_enable; 1148 uint32_t msg_enable;
1152 1149
1153 struct work_struct reset_task; 1150 struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{ 654{
655 struct gfar_private *priv = netdev_priv(dev); 655 struct gfar_private *priv = netdev_priv(dev);
656 unsigned long flags;
657 656
658 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
659 wol->wolopts != 0) 658 wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 663
665 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
666 665
667 spin_lock_irqsave(&priv->bflock, flags); 666 priv->wol_en = !!device_may_wakeup(&dev->dev);
668 priv->wol_en = !!device_may_wakeup(&dev->dev);
669 spin_unlock_irqrestore(&priv->bflock, flags);
670 667
671 return 0; 668 return 0;
672} 669}
@@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
903 return 0; 900 return 0;
904} 901}
905 902
906static int gfar_comp_asc(const void *a, const void *b)
907{
908 return memcmp(a, b, 4);
909}
910
911static int gfar_comp_desc(const void *a, const void *b)
912{
913 return -memcmp(a, b, 4);
914}
915
916static void gfar_swap(void *a, void *b, int size)
917{
918 u32 *_a = a;
919 u32 *_b = b;
920
921 swap(_a[0], _b[0]);
922 swap(_a[1], _b[1]);
923 swap(_a[2], _b[2]);
924 swap(_a[3], _b[3]);
925}
926
927/* Write a mask to filer cache */ 903/* Write a mask to filer cache */
928static void gfar_set_mask(u32 mask, struct filer_table *tab) 904static void gfar_set_mask(u32 mask, struct filer_table *tab)
929{ 905{
@@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1273 return 0; 1249 return 0;
1274} 1250}
1275 1251
1276/* Copy size filer entries */
1277static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1278 struct gfar_filer_entry src[0], s32 size)
1279{
1280 while (size > 0) {
1281 size--;
1282 dst[size].ctrl = src[size].ctrl;
1283 dst[size].prop = src[size].prop;
1284 }
1285}
1286
1287/* Delete the contents of the filer-table between start and end
1288 * and collapse them
1289 */
1290static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1291{
1292 int length;
1293
1294 if (end > MAX_FILER_CACHE_IDX || end < begin)
1295 return -EINVAL;
1296
1297 end++;
1298 length = end - begin;
1299
1300 /* Copy */
1301 while (end < tab->index) {
1302 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1303 tab->fe[begin++].prop = tab->fe[end++].prop;
1304
1305 }
1306 /* Fill up with don't cares */
1307 while (begin < tab->index) {
1308 tab->fe[begin].ctrl = 0x60;
1309 tab->fe[begin].prop = 0xFFFFFFFF;
1310 begin++;
1311 }
1312
1313 tab->index -= length;
1314 return 0;
1315}
1316
1317/* Make space on the wanted location */
1318static int gfar_expand_filer_entries(u32 begin, u32 length,
1319 struct filer_table *tab)
1320{
1321 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1322 begin > MAX_FILER_CACHE_IDX)
1323 return -EINVAL;
1324
1325 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1326 tab->index - length + 1);
1327
1328 tab->index += length;
1329 return 0;
1330}
1331
1332static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1333{
1334 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1335 start++) {
1336 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1337 (RQFCR_AND | RQFCR_CLE))
1338 return start;
1339 }
1340 return -1;
1341}
1342
1343static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1344{
1345 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1346 start++) {
1347 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1348 (RQFCR_CLE))
1349 return start;
1350 }
1351 return -1;
1352}
1353
1354/* Uses hardwares clustering option to reduce
1355 * the number of filer table entries
1356 */
1357static void gfar_cluster_filer(struct filer_table *tab)
1358{
1359 s32 i = -1, j, iend, jend;
1360
1361 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1362 j = i;
1363 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1364 /* The cluster entries self and the previous one
1365 * (a mask) must be identical!
1366 */
1367 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1368 break;
1369 if (tab->fe[i].prop != tab->fe[j].prop)
1370 break;
1371 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1372 break;
1373 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1374 break;
1375 iend = gfar_get_next_cluster_end(i, tab);
1376 jend = gfar_get_next_cluster_end(j, tab);
1377 if (jend == -1 || iend == -1)
1378 break;
1379
1380 /* First we make some free space, where our cluster
1381 * element should be. Then we copy it there and finally
1382 * delete in from its old location.
1383 */
1384 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1385 -EINVAL)
1386 break;
1387
1388 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1389 &(tab->fe[jend + 1]), jend - j);
1390
1391 if (gfar_trim_filer_entries(jend - 1,
1392 jend + (jend - j),
1393 tab) == -EINVAL)
1394 return;
1395
1396 /* Mask out cluster bit */
1397 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1398 }
1399 }
1400}
1401
1402/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1403static void gfar_swap_bits(struct gfar_filer_entry *a1,
1404 struct gfar_filer_entry *a2,
1405 struct gfar_filer_entry *b1,
1406 struct gfar_filer_entry *b2, u32 mask)
1407{
1408 u32 temp[4];
1409 temp[0] = a1->ctrl & mask;
1410 temp[1] = a2->ctrl & mask;
1411 temp[2] = b1->ctrl & mask;
1412 temp[3] = b2->ctrl & mask;
1413
1414 a1->ctrl &= ~mask;
1415 a2->ctrl &= ~mask;
1416 b1->ctrl &= ~mask;
1417 b2->ctrl &= ~mask;
1418
1419 a1->ctrl |= temp[1];
1420 a2->ctrl |= temp[0];
1421 b1->ctrl |= temp[3];
1422 b2->ctrl |= temp[2];
1423}
1424
1425/* Generate a list consisting of masks values with their start and
1426 * end of validity and block as indicator for parts belonging
1427 * together (glued by ANDs) in mask_table
1428 */
1429static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1430 struct filer_table *tab)
1431{
1432 u32 i, and_index = 0, block_index = 1;
1433
1434 for (i = 0; i < tab->index; i++) {
1435
1436 /* LSByte of control = 0 sets a mask */
1437 if (!(tab->fe[i].ctrl & 0xF)) {
1438 mask_table[and_index].mask = tab->fe[i].prop;
1439 mask_table[and_index].start = i;
1440 mask_table[and_index].block = block_index;
1441 if (and_index >= 1)
1442 mask_table[and_index - 1].end = i - 1;
1443 and_index++;
1444 }
1445 /* cluster starts and ends will be separated because they should
1446 * hold their position
1447 */
1448 if (tab->fe[i].ctrl & RQFCR_CLE)
1449 block_index++;
1450 /* A not set AND indicates the end of a depended block */
1451 if (!(tab->fe[i].ctrl & RQFCR_AND))
1452 block_index++;
1453 }
1454
1455 mask_table[and_index - 1].end = i - 1;
1456
1457 return and_index;
1458}
1459
1460/* Sorts the entries of mask_table by the values of the masks.
1461 * Important: The 0xFF80 flags of the first and last entry of a
1462 * block must hold their position (which queue, CLusterEnable, ReJEct,
1463 * AND)
1464 */
1465static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1466 struct filer_table *temp_table, u32 and_index)
1467{
1468 /* Pointer to compare function (_asc or _desc) */
1469 int (*gfar_comp)(const void *, const void *);
1470
1471 u32 i, size = 0, start = 0, prev = 1;
1472 u32 old_first, old_last, new_first, new_last;
1473
1474 gfar_comp = &gfar_comp_desc;
1475
1476 for (i = 0; i < and_index; i++) {
1477 if (prev != mask_table[i].block) {
1478 old_first = mask_table[start].start + 1;
1479 old_last = mask_table[i - 1].end;
1480 sort(mask_table + start, size,
1481 sizeof(struct gfar_mask_entry),
1482 gfar_comp, &gfar_swap);
1483
1484 /* Toggle order for every block. This makes the
1485 * thing more efficient!
1486 */
1487 if (gfar_comp == gfar_comp_desc)
1488 gfar_comp = &gfar_comp_asc;
1489 else
1490 gfar_comp = &gfar_comp_desc;
1491
1492 new_first = mask_table[start].start + 1;
1493 new_last = mask_table[i - 1].end;
1494
1495 gfar_swap_bits(&temp_table->fe[new_first],
1496 &temp_table->fe[old_first],
1497 &temp_table->fe[new_last],
1498 &temp_table->fe[old_last],
1499 RQFCR_QUEUE | RQFCR_CLE |
1500 RQFCR_RJE | RQFCR_AND);
1501
1502 start = i;
1503 size = 0;
1504 }
1505 size++;
1506 prev = mask_table[i].block;
1507 }
1508}
1509
1510/* Reduces the number of masks needed in the filer table to save entries
1511 * This is done by sorting the masks of a depended block. A depended block is
1512 * identified by gluing ANDs or CLE. The sorting order toggles after every
1513 * block. Of course entries in scope of a mask must change their location with
1514 * it.
1515 */
1516static int gfar_optimize_filer_masks(struct filer_table *tab)
1517{
1518 struct filer_table *temp_table;
1519 struct gfar_mask_entry *mask_table;
1520
1521 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1522 s32 ret = 0;
1523
1524 /* We need a copy of the filer table because
1525 * we want to change its order
1526 */
1527 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1528 if (temp_table == NULL)
1529 return -ENOMEM;
1530
1531 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1532 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1533
1534 if (mask_table == NULL) {
1535 ret = -ENOMEM;
1536 goto end;
1537 }
1538
1539 and_index = gfar_generate_mask_table(mask_table, tab);
1540
1541 gfar_sort_mask_table(mask_table, temp_table, and_index);
1542
1543 /* Now we can copy the data from our duplicated filer table to
1544 * the real one in the order the mask table says
1545 */
1546 for (i = 0; i < and_index; i++) {
1547 size = mask_table[i].end - mask_table[i].start + 1;
1548 gfar_copy_filer_entries(&(tab->fe[j]),
1549 &(temp_table->fe[mask_table[i].start]), size);
1550 j += size;
1551 }
1552
1553 /* And finally we just have to check for duplicated masks and drop the
1554 * second ones
1555 */
1556 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1557 if (tab->fe[i].ctrl == 0x80) {
1558 previous_mask = i++;
1559 break;
1560 }
1561 }
1562 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1563 if (tab->fe[i].ctrl == 0x80) {
1564 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1565 /* Two identical ones found!
1566 * So drop the second one!
1567 */
1568 gfar_trim_filer_entries(i, i, tab);
1569 } else
1570 /* Not identical! */
1571 previous_mask = i;
1572 }
1573 }
1574
1575 kfree(mask_table);
1576end: kfree(temp_table);
1577 return ret;
1578}
1579
1580/* Write the bit-pattern from software's buffer to hardware registers */ 1252/* Write the bit-pattern from software's buffer to hardware registers */
1581static int gfar_write_filer_table(struct gfar_private *priv, 1253static int gfar_write_filer_table(struct gfar_private *priv,
1582 struct filer_table *tab) 1254 struct filer_table *tab)
@@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1258 return -EBUSY;
1587 1259
1588 /* Fill regular entries */ 1260 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); 1261 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1262 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1263 /* Fill the rest with fall-troughs */
1593 for (; i < MAX_FILER_IDX - 1; i++) 1264 for (; i < MAX_FILER_IDX; i++)
1594 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1265 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1595 /* Last entry must be default accept 1266 /* Last entry must be default accept
1596 * because that's what people expect 1267 * because that's what people expect
@@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1624{ 1295{
1625 struct ethtool_flow_spec_container *j; 1296 struct ethtool_flow_spec_container *j;
1626 struct filer_table *tab; 1297 struct filer_table *tab;
1627 s32 i = 0;
1628 s32 ret = 0; 1298 s32 ret = 0;
1629 1299
1630 /* So index is set to zero, too! */ 1300 /* So index is set to zero, too! */
@@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1649 } 1319 }
1650 } 1320 }
1651 1321
1652 i = tab->index;
1653
1654 /* Optimizations to save entries */
1655 gfar_cluster_filer(tab);
1656 gfar_optimize_filer_masks(tab);
1657
1658 pr_debug("\tSummary:\n"
1659 "\tData on hardware: %d\n"
1660 "\tCompression rate: %d%%\n",
1661 tab->index, 100 - (100 * tab->index) / i);
1662
1663 /* Write everything to hardware */ 1322 /* Write everything to hardware */
1664 ret = gfar_write_filer_table(priv, tab); 1323 ret = gfar_write_filer_table(priv, tab);
1665 if (ret == -EBUSY) { 1324 if (ret == -EBUSY) {
@@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
1725 } 1384 }
1726 1385
1727process: 1386process:
1387 priv->rx_list.count++;
1728 ret = gfar_process_filer_changes(priv); 1388 ret = gfar_process_filer_changes(priv);
1729 if (ret) 1389 if (ret)
1730 goto clean_list; 1390 goto clean_list;
1731 priv->rx_list.count++;
1732 return ret; 1391 return ret;
1733 1392
1734clean_list: 1393clean_list:
1394 priv->rx_list.count--;
1735 list_del(&temp->list); 1395 list_del(&temp->list);
1736clean_mem: 1396clean_mem:
1737 kfree(temp); 1397 kfree(temp);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcdc795b..b5b2925103ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
216 216
217static inline bool fm10k_page_is_reserved(struct page *page) 217static inline bool fm10k_page_is_reserved(struct page *page)
218{ 218{
219 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
220} 220}
221 221
222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, 222static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b152bd..830466c49987 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6566 6566
6567static inline bool igb_page_is_reserved(struct page *page) 6567static inline bool igb_page_is_reserved(struct page *page)
6568{ 6568{
6569 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 6569 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
6570} 6570}
6571 6571
6572static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, 6572static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104e34ea..ae21e0b06c3a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
1832 1832
1833static inline bool ixgbe_page_is_reserved(struct page *page) 1833static inline bool ixgbe_page_is_reserved(struct page *page)
1834{ 1834{
1835 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 1835 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1836} 1836}
1837 1837
1838/** 1838/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde9cb01..1d7b00b038a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
765 765
766static inline bool ixgbevf_page_is_reserved(struct page *page) 766static inline bool ixgbevf_page_is_reserved(struct page *page)
767{ 767{
768 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; 768 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
769} 769}
770 770
771/** 771/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 struct mvneta_rx_queue *rxq) 1462 struct mvneta_rx_queue *rxq)
1463{ 1463{
1464 struct net_device *dev = pp->dev; 1464 struct net_device *dev = pp->dev;
1465 int rx_done, rx_filled; 1465 int rx_done;
1466 u32 rcvd_pkts = 0; 1466 u32 rcvd_pkts = 0;
1467 u32 rcvd_bytes = 0; 1467 u32 rcvd_bytes = 0;
1468 1468
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1473 rx_todo = rx_done; 1473 rx_todo = rx_done;
1474 1474
1475 rx_done = 0; 1475 rx_done = 0;
1476 rx_filled = 0;
1477 1476
1478 /* Fairness NAPI loop */ 1477 /* Fairness NAPI loop */
1479 while (rx_done < rx_todo) { 1478 while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1484 int rx_bytes, err; 1483 int rx_bytes, err;
1485 1484
1486 rx_done++; 1485 rx_done++;
1487 rx_filled++;
1488 rx_status = rx_desc->status; 1486 rx_status = rx_desc->status;
1489 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1490 data = (unsigned char *)rx_desc->buf_cookie; 1488 data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1524 continue; 1522 continue;
1525 } 1523 }
1526 1524
1525 /* Refill processing */
1526 err = mvneta_rx_refill(pp, rx_desc);
1527 if (err) {
1528 netdev_err(dev, "Linux processing - Can't refill\n");
1529 rxq->missed++;
1530 goto err_drop_frame;
1531 }
1532
1527 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1533 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1528 if (!skb) 1534 if (!skb)
1529 goto err_drop_frame; 1535 goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1543 mvneta_rx_csum(pp, rx_status, skb); 1549 mvneta_rx_csum(pp, rx_status, skb);
1544 1550
1545 napi_gro_receive(&pp->napi, skb); 1551 napi_gro_receive(&pp->napi, skb);
1546
1547 /* Refill processing */
1548 err = mvneta_rx_refill(pp, rx_desc);
1549 if (err) {
1550 netdev_err(dev, "Linux processing - Can't refill\n");
1551 rxq->missed++;
1552 rx_filled--;
1553 }
1554 } 1552 }
1555 1553
1556 if (rcvd_pkts) { 1554 if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1563 } 1561 }
1564 1562
1565 /* Update rxq management counters */ 1563 /* Update rxq management counters */
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1564 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1567 1565
1568 return rx_done; 1566 return rx_done;
1569} 1567}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
27#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/hrtimer.h>
31#include <linux/ktime.h>
30#include <uapi/linux/ppp_defs.h> 32#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h> 33#include <net/ip.h>
32#include <net/ipv6.h> 34#include <net/ipv6.h>
@@ -299,6 +301,7 @@
299 301
300/* Coalescing */ 302/* Coalescing */
301#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 303#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
302#define MVPP2_RX_COAL_PKTS 32 305#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100 306#define MVPP2_RX_COAL_USEC 100
304 307
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
660 u64 tx_bytes; 663 u64 tx_bytes;
661}; 664};
662 665
666/* Per-CPU port control */
667struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
672};
673
663struct mvpp2_port { 674struct mvpp2_port {
664 u8 id; 675 u8 id;
665 676
@@ -679,6 +690,9 @@ struct mvpp2_port {
679 u32 pending_cause_rx; 690 u32 pending_cause_rx;
680 struct napi_struct napi; 691 struct napi_struct napi;
681 692
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
695
682 /* Flags */ 696 /* Flags */
683 unsigned long flags; 697 unsigned long flags;
684 698
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
776 /* Array of transmitted skb */ 790 /* Array of transmitted skb */
777 struct sk_buff **tx_skb; 791 struct sk_buff **tx_skb;
778 792
793 /* Array of transmitted buffers' physical addresses */
794 dma_addr_t *tx_buffs;
795
779 /* Index of last TX DMA descriptor that was inserted */ 796 /* Index of last TX DMA descriptor that was inserted */
780 int txq_put_index; 797 int txq_put_index;
781 798
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
913 /* Occupied buffers indicator */ 930 /* Occupied buffers indicator */
914 atomic_t in_use; 931 atomic_t in_use;
915 int in_use_thresh; 932 int in_use_thresh;
916
917 spinlock_t lock;
918}; 933};
919 934
920struct mvpp2_buff_hdr { 935struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
963} 978}
964 979
965static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, 980static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
966 struct sk_buff *skb) 981 struct sk_buff *skb,
982 struct mvpp2_tx_desc *tx_desc)
967{ 983{
968 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; 984 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
985 if (skb)
986 txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987 tx_desc->buf_phys_addr;
969 txq_pcpu->txq_put_index++; 988 txq_pcpu->txq_put_index++;
970 if (txq_pcpu->txq_put_index == txq_pcpu->size) 989 if (txq_pcpu->txq_put_index == txq_pcpu->size)
971 txq_pcpu->txq_put_index = 0; 990 txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
3376 bm_pool->pkt_size = 0; 3395 bm_pool->pkt_size = 0;
3377 bm_pool->buf_num = 0; 3396 bm_pool->buf_num = 0;
3378 atomic_set(&bm_pool->in_use, 0); 3397 atomic_set(&bm_pool->in_use, 0);
3379 spin_lock_init(&bm_pool->lock);
3380 3398
3381 return 0; 3399 return 0;
3382} 3400}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
3647mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 3665mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3648 int pkt_size) 3666 int pkt_size)
3649{ 3667{
3650 unsigned long flags = 0;
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 3668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3652 int num; 3669 int num;
3653 3670
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3656 return NULL; 3673 return NULL;
3657 } 3674 }
3658 3675
3659 spin_lock_irqsave(&new_pool->lock, flags);
3660
3661 if (new_pool->type == MVPP2_BM_FREE) 3676 if (new_pool->type == MVPP2_BM_FREE)
3662 new_pool->type = type; 3677 new_pool->type = type;
3663 3678
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3686 if (num != pkts_num) { 3701 if (num != pkts_num) {
3687 WARN(1, "pool %d: %d of %d allocated\n", 3702 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool->id, num, pkts_num); 3703 new_pool->id, num, pkts_num);
3689 /* We need to undo the bufs_add() allocations */
3690 spin_unlock_irqrestore(&new_pool->lock, flags);
3691 return NULL; 3704 return NULL;
3692 } 3705 }
3693 } 3706 }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3695 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 3708 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3696 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 3709 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3697 3710
3698 spin_unlock_irqrestore(&new_pool->lock, flags);
3699
3700 return new_pool; 3711 return new_pool;
3701} 3712}
3702 3713
3703/* Initialize pools for swf */ 3714/* Initialize pools for swf */
3704static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 3715static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3705{ 3716{
3706 unsigned long flags = 0;
3707 int rxq; 3717 int rxq;
3708 3718
3709 if (!port->pool_long) { 3719 if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3714 if (!port->pool_long) 3724 if (!port->pool_long)
3715 return -ENOMEM; 3725 return -ENOMEM;
3716 3726
3717 spin_lock_irqsave(&port->pool_long->lock, flags);
3718 port->pool_long->port_map |= (1 << port->id); 3727 port->pool_long->port_map |= (1 << port->id);
3719 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3720 3728
3721 for (rxq = 0; rxq < rxq_number; rxq++) 3729 for (rxq = 0; rxq < rxq_number; rxq++)
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 3730 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3730 if (!port->pool_short) 3738 if (!port->pool_short)
3731 return -ENOMEM; 3739 return -ENOMEM;
3732 3740
3733 spin_lock_irqsave(&port->pool_short->lock, flags);
3734 port->pool_short->port_map |= (1 << port->id); 3741 port->pool_short->port_map |= (1 << port->id);
3735 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3736 3742
3737 for (rxq = 0; rxq < rxq_number; rxq++) 3743 for (rxq = 0; rxq < rxq_number; rxq++)
3738 mvpp2_rxq_short_pool_set(port, rxq, 3744 mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
3806 3812
3807 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3808 (MVPP2_CAUSE_MISC_SUM_MASK | 3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); 3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3811} 3816}
3812 3817
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4382 rxq->time_coal = usec; 4387 rxq->time_coal = usec;
4383} 4388}
4384 4389
4385/* Set threshold for TX_DONE pkts coalescing */
4386static void mvpp2_tx_done_pkts_coal_set(void *arg)
4387{
4388 struct mvpp2_port *port = arg;
4389 int queue;
4390 u32 val;
4391
4392 for (queue = 0; queue < txq_number; queue++) {
4393 struct mvpp2_tx_queue *txq = port->txqs[queue];
4394
4395 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4396 MVPP2_TRANSMITTED_THRESH_MASK;
4397 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4398 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4399 }
4400}
4401
4402/* Free Tx queue skbuffs */ 4390/* Free Tx queue skbuffs */
4403static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4391static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4404 struct mvpp2_tx_queue *txq, 4392 struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4407 int i; 4395 int i;
4408 4396
4409 for (i = 0; i < num; i++) { 4397 for (i = 0; i < num; i++) {
4410 struct mvpp2_tx_desc *tx_desc = txq->descs + 4398 dma_addr_t buf_phys_addr =
4411 txq_pcpu->txq_get_index; 4399 txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4412 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; 4400 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4413 4401
4414 mvpp2_txq_inc_get(txq_pcpu); 4402 mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4416 if (!skb) 4404 if (!skb)
4417 continue; 4405 continue;
4418 4406
4419 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, 4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4420 tx_desc->data_size, DMA_TO_DEVICE); 4408 skb_headlen(skb), DMA_TO_DEVICE);
4421 dev_kfree_skb_any(skb); 4409 dev_kfree_skb_any(skb);
4422 } 4410 }
4423} 4411}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4433static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4421static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4434 u32 cause) 4422 u32 cause)
4435{ 4423{
4436 int queue = fls(cause >> 16) - 1; 4424 int queue = fls(cause) - 1;
4437 4425
4438 return port->txqs[queue]; 4426 return port->txqs[queue];
4439} 4427}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4460 netif_tx_wake_queue(nq); 4448 netif_tx_wake_queue(nq);
4461} 4449}
4462 4450
4451static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4452{
4453 struct mvpp2_tx_queue *txq;
4454 struct mvpp2_txq_pcpu *txq_pcpu;
4455 unsigned int tx_todo = 0;
4456
4457 while (cause) {
4458 txq = mvpp2_get_tx_queue(port, cause);
4459 if (!txq)
4460 break;
4461
4462 txq_pcpu = this_cpu_ptr(txq->pcpu);
4463
4464 if (txq_pcpu->count) {
4465 mvpp2_txq_done(port, txq, txq_pcpu);
4466 tx_todo += txq_pcpu->count;
4467 }
4468
4469 cause &= ~(1 << txq->log_id);
4470 }
4471 return tx_todo;
4472}
4473
4463/* Rx/Tx queue initialization/cleanup methods */ 4474/* Rx/Tx queue initialization/cleanup methods */
4464 4475
4465/* Allocate and initialize descriptors for aggr TXQ */ 4476/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4649 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * 4660 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4650 sizeof(*txq_pcpu->tx_skb), 4661 sizeof(*txq_pcpu->tx_skb),
4651 GFP_KERNEL); 4662 GFP_KERNEL);
4652 if (!txq_pcpu->tx_skb) { 4663 if (!txq_pcpu->tx_skb)
4653 dma_free_coherent(port->dev->dev.parent, 4664 goto error;
4654 txq->size * MVPP2_DESC_ALIGNED_SIZE, 4665
4655 txq->descs, txq->descs_phys); 4666 txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4656 return -ENOMEM; 4667 sizeof(dma_addr_t), GFP_KERNEL);
4657 } 4668 if (!txq_pcpu->tx_buffs)
4669 goto error;
4658 4670
4659 txq_pcpu->count = 0; 4671 txq_pcpu->count = 0;
4660 txq_pcpu->reserved_num = 0; 4672 txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4663 } 4675 }
4664 4676
4665 return 0; 4677 return 0;
4678
4679error:
4680 for_each_present_cpu(cpu) {
4681 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4682 kfree(txq_pcpu->tx_skb);
4683 kfree(txq_pcpu->tx_buffs);
4684 }
4685
4686 dma_free_coherent(port->dev->dev.parent,
4687 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4688 txq->descs, txq->descs_phys);
4689
4690 return -ENOMEM;
4666} 4691}
4667 4692
4668/* Free allocated TXQ resources */ 4693/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
4675 for_each_present_cpu(cpu) { 4700 for_each_present_cpu(cpu) {
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4701 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4677 kfree(txq_pcpu->tx_skb); 4702 kfree(txq_pcpu->tx_skb);
4703 kfree(txq_pcpu->tx_buffs);
4678 } 4704 }
4679 4705
4680 if (txq->descs) 4706 if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
4805 goto err_cleanup; 4831 goto err_cleanup;
4806 } 4832 }
4807 4833
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 4834 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4810 return 0; 4835 return 0;
4811 4836
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
4887 } 4912 }
4888} 4913}
4889 4914
4915static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4916{
4917 ktime_t interval;
4918
4919 if (!port_pcpu->timer_scheduled) {
4920 port_pcpu->timer_scheduled = true;
4921 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4922 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4923 HRTIMER_MODE_REL_PINNED);
4924 }
4925}
4926
4927static void mvpp2_tx_proc_cb(unsigned long data)
4928{
4929 struct net_device *dev = (struct net_device *)data;
4930 struct mvpp2_port *port = netdev_priv(dev);
4931 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4932 unsigned int tx_todo, cause;
4933
4934 if (!netif_running(dev))
4935 return;
4936 port_pcpu->timer_scheduled = false;
4937
4938 /* Process all the Tx queues */
4939 cause = (1 << txq_number) - 1;
4940 tx_todo = mvpp2_tx_done(port, cause);
4941
4942 /* Set the timer in case not all the packets were processed */
4943 if (tx_todo)
4944 mvpp2_timer_set(port_pcpu);
4945}
4946
4947static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4948{
4949 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4950 struct mvpp2_port_pcpu,
4951 tx_done_timer);
4952
4953 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4954
4955 return HRTIMER_NORESTART;
4956}
4957
4890/* Main RX/TX processing routines */ 4958/* Main RX/TX processing routines */
4891 4959
4892/* Display more error info */ 4960/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5144 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 5212 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5145 /* Last descriptor */ 5213 /* Last descriptor */
5146 tx_desc->command = MVPP2_TXD_L_DESC; 5214 tx_desc->command = MVPP2_TXD_L_DESC;
5147 mvpp2_txq_inc_put(txq_pcpu, skb); 5215 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5148 } else { 5216 } else {
5149 /* Descriptor in the middle: Not First, Not Last */ 5217 /* Descriptor in the middle: Not First, Not Last */
5150 tx_desc->command = 0; 5218 tx_desc->command = 0;
5151 mvpp2_txq_inc_put(txq_pcpu, NULL); 5219 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5152 } 5220 }
5153 } 5221 }
5154 5222
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5214 /* First and Last descriptor */ 5282 /* First and Last descriptor */
5215 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 5283 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5216 tx_desc->command = tx_cmd; 5284 tx_desc->command = tx_cmd;
5217 mvpp2_txq_inc_put(txq_pcpu, skb); 5285 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5218 } else { 5286 } else {
5219 /* First but not Last */ 5287 /* First but not Last */
5220 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 5288 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5221 tx_desc->command = tx_cmd; 5289 tx_desc->command = tx_cmd;
5222 mvpp2_txq_inc_put(txq_pcpu, NULL); 5290 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5223 5291
5224 /* Continue with other skb fragments */ 5292 /* Continue with other skb fragments */
5225 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 5293 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
5255 dev_kfree_skb_any(skb); 5323 dev_kfree_skb_any(skb);
5256 } 5324 }
5257 5325
5326 /* Finalize TX processing */
5327 if (txq_pcpu->count >= txq->done_pkts_coal)
5328 mvpp2_txq_done(port, txq, txq_pcpu);
5329
5330 /* Set the timer in case not all frags were processed */
5331 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5332 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5333
5334 mvpp2_timer_set(port_pcpu);
5335 }
5336
5258 return NETDEV_TX_OK; 5337 return NETDEV_TX_OK;
5259} 5338}
5260 5339
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5268 netdev_err(dev, "tx fifo underrun error\n"); 5347 netdev_err(dev, "tx fifo underrun error\n");
5269} 5348}
5270 5349
5271static void mvpp2_txq_done_percpu(void *arg) 5350static int mvpp2_poll(struct napi_struct *napi, int budget)
5272{ 5351{
5273 struct mvpp2_port *port = arg; 5352 u32 cause_rx_tx, cause_rx, cause_misc;
5274 u32 cause_rx_tx, cause_tx, cause_misc; 5353 int rx_done = 0;
5354 struct mvpp2_port *port = netdev_priv(napi->dev);
5275 5355
5276 /* Rx/Tx cause register 5356 /* Rx/Tx cause register
5277 * 5357 *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
5285 */ 5365 */
5286 cause_rx_tx = mvpp2_read(port->priv, 5366 cause_rx_tx = mvpp2_read(port->priv,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5367 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5288 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5368 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5289 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5369 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5290 5370
5291 if (cause_misc) { 5371 if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
5297 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 5377 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5298 } 5378 }
5299 5379
5300 /* Release TX descriptors */
5301 if (cause_tx) {
5302 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5303 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5304
5305 if (txq_pcpu->count)
5306 mvpp2_txq_done(port, txq, txq_pcpu);
5307 }
5308}
5309
5310static int mvpp2_poll(struct napi_struct *napi, int budget)
5311{
5312 u32 cause_rx_tx, cause_rx;
5313 int rx_done = 0;
5314 struct mvpp2_port *port = netdev_priv(napi->dev);
5315
5316 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5317
5318 cause_rx_tx = mvpp2_read(port->priv,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5320 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5380 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5321 5381
5322 /* Process RX packets */ 5382 /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
5561static int mvpp2_stop(struct net_device *dev) 5621static int mvpp2_stop(struct net_device *dev)
5562{ 5622{
5563 struct mvpp2_port *port = netdev_priv(dev); 5623 struct mvpp2_port *port = netdev_priv(dev);
5624 struct mvpp2_port_pcpu *port_pcpu;
5625 int cpu;
5564 5626
5565 mvpp2_stop_dev(port); 5627 mvpp2_stop_dev(port);
5566 mvpp2_phy_disconnect(port); 5628 mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
5569 on_each_cpu(mvpp2_interrupts_mask, port, 1); 5631 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5570 5632
5571 free_irq(port->irq, port); 5633 free_irq(port->irq, port);
5634 for_each_present_cpu(cpu) {
5635 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5636
5637 hrtimer_cancel(&port_pcpu->tx_done_timer);
5638 port_pcpu->timer_scheduled = false;
5639 tasklet_kill(&port_pcpu->tx_done_tasklet);
5640 }
5572 mvpp2_cleanup_rxqs(port); 5641 mvpp2_cleanup_rxqs(port);
5573 mvpp2_cleanup_txqs(port); 5642 mvpp2_cleanup_txqs(port);
5574 5643
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5784 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5785 } 5854 }
5786 5855
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5788 return 0; 5856 return 0;
5789} 5857}
5790 5858
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6035{ 6103{
6036 struct device_node *phy_node; 6104 struct device_node *phy_node;
6037 struct mvpp2_port *port; 6105 struct mvpp2_port *port;
6106 struct mvpp2_port_pcpu *port_pcpu;
6038 struct net_device *dev; 6107 struct net_device *dev;
6039 struct resource *res; 6108 struct resource *res;
6040 const char *dt_mac_addr; 6109 const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6044 int features; 6113 int features;
6045 int phy_mode; 6114 int phy_mode;
6046 int priv_common_regs_num = 2; 6115 int priv_common_regs_num = 2;
6047 int err, i; 6116 int err, i, cpu;
6048 6117
6049 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, 6118 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6050 rxq_number); 6119 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6135 } 6204 }
6136 mvpp2_port_power_up(port); 6205 mvpp2_port_power_up(port);
6137 6206
6207 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6208 if (!port->pcpu) {
6209 err = -ENOMEM;
6210 goto err_free_txq_pcpu;
6211 }
6212
6213 for_each_present_cpu(cpu) {
6214 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6215
6216 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6217 HRTIMER_MODE_REL_PINNED);
6218 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6219 port_pcpu->timer_scheduled = false;
6220
6221 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6222 (unsigned long)dev);
6223 }
6224
6138 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); 6225 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6139 features = NETIF_F_SG | NETIF_F_IP_CSUM; 6226 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6140 dev->features = features | NETIF_F_RXCSUM; 6227 dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6144 err = register_netdev(dev); 6231 err = register_netdev(dev);
6145 if (err < 0) { 6232 if (err < 0) {
6146 dev_err(&pdev->dev, "failed to register netdev\n"); 6233 dev_err(&pdev->dev, "failed to register netdev\n");
6147 goto err_free_txq_pcpu; 6234 goto err_free_port_pcpu;
6148 } 6235 }
6149 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6236 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6150 6237
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6153 priv->port_list[id] = port; 6240 priv->port_list[id] = port;
6154 return 0; 6241 return 0;
6155 6242
6243err_free_port_pcpu:
6244 free_percpu(port->pcpu);
6156err_free_txq_pcpu: 6245err_free_txq_pcpu:
6157 for (i = 0; i < txq_number; i++) 6246 for (i = 0; i < txq_number; i++)
6158 free_percpu(port->txqs[i]->pcpu); 6247 free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
6171 int i; 6260 int i;
6172 6261
6173 unregister_netdev(port->dev); 6262 unregister_netdev(port->dev);
6263 free_percpu(port->pcpu);
6174 free_percpu(port->stats); 6264 free_percpu(port->stats);
6175 for (i = 0; i < txq_number; i++) 6265 for (i = 0; i < txq_number; i++)
6176 free_percpu(port->txqs[i]->pcpu); 6266 free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
686{ 686{
687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 struct mlx4_cmd_context *context; 688 struct mlx4_cmd_context *context;
689 long ret_wait;
689 int err = 0; 690 int err = 0;
690 691
691 down(&cmd->event_sem); 692 down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
711 if (err) 712 if (err)
712 goto out_reset; 713 goto out_reset;
713 714
714 if (!wait_for_completion_timeout(&context->done, 715 if (op == MLX4_CMD_SENSE_PORT) {
715 msecs_to_jiffies(timeout))) { 716 ret_wait =
717 wait_for_completion_interruptible_timeout(&context->done,
718 msecs_to_jiffies(timeout));
719 if (ret_wait < 0) {
720 context->fw_status = 0;
721 context->out_param = 0;
722 context->result = 0;
723 }
724 } else {
725 ret_wait = (long)wait_for_completion_timeout(&context->done,
726 msecs_to_jiffies(timeout));
727 }
728 if (!ret_wait) {
716 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 729 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717 op); 730 op);
718 if (op == MLX4_CMD_NOP) { 731 if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{ 248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons; 249 return ring->prod == ring->cons;
251} 250}
252 251
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
601 continue; 601 continue;
602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
603 __func__, i, port); 603 __func__, i, port);
604 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 604 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
606 eqe->event.port_change.port = 606 eqe->event.port_change.port =
607 cpu_to_be32( 607 cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
640 continue; 640 continue;
641 if (i == mlx4_master_func_num(dev)) 641 if (i == mlx4_master_func_num(dev))
642 continue; 642 continue;
643 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 643 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
645 eqe->event.port_change.port = 645 eqe->event.port_change.port =
646 cpu_to_be32( 646 cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2273 } else if (err == -ENOENT) { 2273 } else if (err == -ENOENT) {
2274 err = 0; 2274 err = 0;
2275 continue; 2275 continue;
2276 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2277 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2278 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2279 MLX4_SINK_COUNTER_INDEX(dev));
2280 err = 0;
2276 } else { 2281 } else {
2277 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2282 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2278 __func__, port + 1, err); 2283 __func__, port + 1, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
391 /* disable cmdif checksum */ 391 /* disable cmdif checksum */
392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
393 393
394 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
395
394 err = set_caps(dev, set_ctx, set_sz); 396 err = set_caps(dev, set_ctx, set_sz);
395 397
396query_ex: 398query_ex:
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a00f15..09d2e16fd6b0 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
952 952
953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 954 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
955 err = dma_mapping_error(adapter->dev, 955 if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
956 sg_dma_address(&tx_ctl->sg)); 956 err = -ENOMEM;
957 if (err) {
958 sg_dma_address(&tx_ctl->sg) = 0; 957 sg_dma_address(&tx_ctl->sg) = 0;
959 goto err; 958 goto err;
960 } 959 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1415 if (fw->size & 0xF) { 1415 if (fw->size & 0xF) {
1416 addr = dest + size; 1416 addr = dest + size;
1417 for (i = 0; i < (fw->size & 0xF); i++) 1417 for (i = 0; i < (fw->size & 0xF); i++)
1418 data[i] = temp[size + i]; 1418 data[i] = ((u8 *)temp)[size + i];
1419 for (; i < 16; i++) 1419 for (; i < 16; i++)
1420 data[i] = 0; 1420 data[i] = 0;
1421 ret = qlcnic_ms_mem_write128(adapter, addr, 1421 ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4875 case RTL_GIGA_MAC_VER_46: 4875 case RTL_GIGA_MAC_VER_46:
4876 case RTL_GIGA_MAC_VER_47: 4876 case RTL_GIGA_MAC_VER_47:
4877 case RTL_GIGA_MAC_VER_48: 4877 case RTL_GIGA_MAC_VER_48:
4878 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4879 break;
4878 case RTL_GIGA_MAC_VER_49: 4880 case RTL_GIGA_MAC_VER_49:
4879 case RTL_GIGA_MAC_VER_50: 4881 case RTL_GIGA_MAC_VER_50:
4880 case RTL_GIGA_MAC_VER_51: 4882 case RTL_GIGA_MAC_VER_51:
4881 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); 4883 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4882 break; 4884 break;
4883 default: 4885 default:
4884 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4886 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228 struct ravb_desc *desc = NULL; 228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231 struct sk_buff *skb;
232 dma_addr_t dma_addr; 231 dma_addr_t dma_addr;
233 void *buffer;
234 int i; 232 int i;
235 233
236 priv->cur_rx[q] = 0; 234 priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241 memset(priv->rx_ring[q], 0, rx_ring_size); 239 memset(priv->rx_ring[q], 0, rx_ring_size);
242 /* Build RX ring buffer */ 240 /* Build RX ring buffer */
243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
244 priv->rx_skb[q][i] = NULL;
245 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246 if (!skb)
247 break;
248 ravb_set_buffer_align(skb);
249 /* RX descriptor */ 242 /* RX descriptor */
250 rx_desc = &priv->rx_ring[q][i]; 243 rx_desc = &priv->rx_ring[q][i];
251 /* The size of the buffer should be on 16-byte boundary. */ 244 /* The size of the buffer should be on 16-byte boundary. */
252 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253 dma_addr = dma_map_single(&ndev->dev, skb->data, 246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254 ALIGN(PKT_BUF_SZ, 16), 247 ALIGN(PKT_BUF_SZ, 16),
255 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&ndev->dev, dma_addr)) { 249 /* We just set the data size to 0 for a failed mapping which
257 dev_kfree_skb(skb); 250 * should prevent DMA from happening...
258 break; 251 */
259 } 252 if (dma_mapping_error(&ndev->dev, dma_addr))
260 priv->rx_skb[q][i] = skb; 253 rx_desc->ds_cc = cpu_to_le16(0);
261 rx_desc->dptr = cpu_to_le32(dma_addr); 254 rx_desc->dptr = cpu_to_le32(dma_addr);
262 rx_desc->die_dt = DT_FEMPTY; 255 rx_desc->die_dt = DT_FEMPTY;
263 } 256 }
264 rx_desc = &priv->rx_ring[q][i]; 257 rx_desc = &priv->rx_ring[q][i];
265 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266 rx_desc->die_dt = DT_LINKFIX; /* type */ 259 rx_desc->die_dt = DT_LINKFIX; /* type */
267 priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268 260
269 memset(priv->tx_ring[q], 0, tx_ring_size); 261 memset(priv->tx_ring[q], 0, tx_ring_size);
270 /* Build TX ring buffer */ 262 /* Build TX ring buffer */
271 for (i = 0; i < priv->num_tx_ring[q]; i++) { 263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
272 priv->tx_skb[q][i] = NULL;
273 priv->tx_buffers[q][i] = NULL;
274 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275 if (!buffer)
276 break;
277 /* Aligned TX buffer */
278 priv->tx_buffers[q][i] = buffer;
279 tx_desc = &priv->tx_ring[q][i]; 264 tx_desc = &priv->tx_ring[q][i];
280 tx_desc->die_dt = DT_EEMPTY; 265 tx_desc->die_dt = DT_EEMPTY;
281 } 266 }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298static int ravb_ring_init(struct net_device *ndev, int q) 283static int ravb_ring_init(struct net_device *ndev, int q)
299{ 284{
300 struct ravb_private *priv = netdev_priv(ndev); 285 struct ravb_private *priv = netdev_priv(ndev);
286 struct sk_buff *skb;
301 int ring_size; 287 int ring_size;
288 void *buffer;
289 int i;
302 290
303 /* Allocate RX and TX skb rings */ 291 /* Allocate RX and TX skb rings */
304 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
309 goto error; 297 goto error;
310 298
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301 if (!skb)
302 goto error;
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
305 }
306
311 /* Allocate rings for the aligned buffers */ 307 /* Allocate rings for the aligned buffers */
312 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314 if (!priv->tx_buffers[q]) 310 if (!priv->tx_buffers[q])
315 goto error; 311 goto error;
316 312
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315 if (!buffer)
316 goto error;
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
319 }
320
317 /* Allocate all RX descriptors. */ 321 /* Allocate all RX descriptors. */
318 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, 323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524 if (--boguscnt < 0) 528 if (--boguscnt < 0)
525 break; 529 break;
526 530
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (!pkt_len)
533 continue;
534
527 if (desc_status & MSC_MC) 535 if (desc_status & MSC_MC)
528 stats->multicast++; 536 stats->multicast++;
529 537
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
543 551
544 skb = priv->rx_skb[q][entry]; 552 skb = priv->rx_skb[q][entry];
545 priv->rx_skb[q][entry] = NULL; 553 priv->rx_skb[q][entry] = NULL;
546 dma_sync_single_for_cpu(&ndev->dev, 554 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
547 le32_to_cpu(desc->dptr), 555 ALIGN(PKT_BUF_SZ, 16),
548 ALIGN(PKT_BUF_SZ, 16), 556 DMA_FROM_DEVICE);
549 DMA_FROM_DEVICE);
550 get_ts &= (q == RAVB_NC) ? 557 get_ts &= (q == RAVB_NC) ?
551 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 558 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
552 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 559 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
584 if (!skb) 591 if (!skb)
585 break; /* Better luck next round. */ 592 break; /* Better luck next round. */
586 ravb_set_buffer_align(skb); 593 ravb_set_buffer_align(skb);
587 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
588 ALIGN(PKT_BUF_SZ, 16),
589 DMA_FROM_DEVICE);
590 dma_addr = dma_map_single(&ndev->dev, skb->data, 594 dma_addr = dma_map_single(&ndev->dev, skb->data,
591 le16_to_cpu(desc->ds_cc), 595 le16_to_cpu(desc->ds_cc),
592 DMA_FROM_DEVICE); 596 DMA_FROM_DEVICE);
593 skb_checksum_none_assert(skb); 597 skb_checksum_none_assert(skb);
594 if (dma_mapping_error(&ndev->dev, dma_addr)) { 598 /* We just set the data size to 0 for a failed mapping
595 dev_kfree_skb_any(skb); 599 * which should prevent DMA from happening...
596 break; 600 */
597 } 601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
598 desc->dptr = cpu_to_le32(dma_addr); 603 desc->dptr = cpu_to_le32(dma_addr);
599 priv->rx_skb[q][entry] = skb; 604 priv->rx_skb[q][entry] = skb;
600 } 605 }
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1279 u32 dma_addr; 1284 u32 dma_addr;
1280 void *buffer; 1285 void *buffer;
1281 u32 entry; 1286 u32 entry;
1282 u32 tccr;
1283 1287
1284 spin_lock_irqsave(&priv->lock, flags); 1288 spin_lock_irqsave(&priv->lock, flags);
1285 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { 1289 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1328 dma_wmb(); 1332 dma_wmb();
1329 desc->die_dt = DT_FSINGLE; 1333 desc->die_dt = DT_FSINGLE;
1330 1334
1331 tccr = ravb_read(ndev, TCCR); 1335 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1332 if (!(tccr & (TCCR_TSRQ0 << q)))
1333 ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
1334 1336
1335 priv->cur_tx[q]++; 1337 priv->cur_tx[q]++;
1336 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && 1338 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4822 ROCKER_OP_FLAG_REMOVE); 4822 ROCKER_OP_FLAG_REMOVE);
4823 unregister_netdev(rocker_port->dev); 4823 unregister_netdev(rocker_port->dev);
4824 free_netdev(rocker_port->dev);
4824 } 4825 }
4825 kfree(rocker->ports); 4826 kfree(rocker->ports);
4826} 4827}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 847643455468..605cc8948594 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -101,6 +101,11 @@ static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
101 return resource_size(&efx->pci_dev->resource[bar]); 101 return resource_size(&efx->pci_dev->resource[bar]);
102} 102}
103 103
104static bool efx_ef10_is_vf(struct efx_nic *efx)
105{
106 return efx->type->is_vf;
107}
108
104static int efx_ef10_get_pf_index(struct efx_nic *efx) 109static int efx_ef10_get_pf_index(struct efx_nic *efx)
105{ 110{
106 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 111 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
@@ -677,6 +682,48 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
677 return efx_ef10_probe(efx); 682 return efx_ef10_probe(efx);
678} 683}
679 684
685int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
686{
687 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
688
689 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
690 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
691 NULL, 0, NULL);
692}
693
694int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
695{
696 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
697
698 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
699 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
700 NULL, 0, NULL);
701}
702
703int efx_ef10_vport_add_mac(struct efx_nic *efx,
704 unsigned int port_id, u8 *mac)
705{
706 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
707
708 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
709 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
710
711 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
712 sizeof(inbuf), NULL, 0, NULL);
713}
714
715int efx_ef10_vport_del_mac(struct efx_nic *efx,
716 unsigned int port_id, u8 *mac)
717{
718 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
719
720 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
721 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
722
723 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
724 sizeof(inbuf), NULL, 0, NULL);
725}
726
680#ifdef CONFIG_SFC_SRIOV 727#ifdef CONFIG_SFC_SRIOV
681static int efx_ef10_probe_vf(struct efx_nic *efx) 728static int efx_ef10_probe_vf(struct efx_nic *efx)
682{ 729{
@@ -3804,6 +3851,72 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3804 WARN_ON(remove_failed); 3851 WARN_ON(remove_failed);
3805} 3852}
3806 3853
3854static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
3855{
3856 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3857 u8 mac_old[ETH_ALEN];
3858 int rc, rc2;
3859
3860 /* Only reconfigure a PF-created vport */
3861 if (is_zero_ether_addr(nic_data->vport_mac))
3862 return 0;
3863
3864 efx_device_detach_sync(efx);
3865 efx_net_stop(efx->net_dev);
3866 down_write(&efx->filter_sem);
3867 efx_ef10_filter_table_remove(efx);
3868 up_write(&efx->filter_sem);
3869
3870 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
3871 if (rc)
3872 goto restore_filters;
3873
3874 ether_addr_copy(mac_old, nic_data->vport_mac);
3875 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
3876 nic_data->vport_mac);
3877 if (rc)
3878 goto restore_vadaptor;
3879
3880 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
3881 efx->net_dev->dev_addr);
3882 if (!rc) {
3883 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
3884 } else {
3885 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
3886 if (rc2) {
3887 /* Failed to add original MAC, so clear vport_mac */
3888 eth_zero_addr(nic_data->vport_mac);
3889 goto reset_nic;
3890 }
3891 }
3892
3893restore_vadaptor:
3894 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
3895 if (rc2)
3896 goto reset_nic;
3897restore_filters:
3898 down_write(&efx->filter_sem);
3899 rc2 = efx_ef10_filter_table_probe(efx);
3900 up_write(&efx->filter_sem);
3901 if (rc2)
3902 goto reset_nic;
3903
3904 rc2 = efx_net_open(efx->net_dev);
3905 if (rc2)
3906 goto reset_nic;
3907
3908 netif_device_attach(efx->net_dev);
3909
3910 return rc;
3911
3912reset_nic:
3913 netif_err(efx, drv, efx->net_dev,
3914 "Failed to restore when changing MAC address - scheduling reset\n");
3915 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
3916
3917 return rc ? rc : rc2;
3918}
3919
3807static int efx_ef10_set_mac_address(struct efx_nic *efx) 3920static int efx_ef10_set_mac_address(struct efx_nic *efx)
3808{ 3921{
3809 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 3922 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -3820,8 +3933,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
3820 efx->net_dev->dev_addr); 3933 efx->net_dev->dev_addr);
3821 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 3934 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
3822 nic_data->vport_id); 3935 nic_data->vport_id);
3823 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 3936 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
3824 sizeof(inbuf), NULL, 0, NULL); 3937 sizeof(inbuf), NULL, 0, NULL);
3825 3938
3826 efx_ef10_filter_table_probe(efx); 3939 efx_ef10_filter_table_probe(efx);
3827 up_write(&efx->filter_sem); 3940 up_write(&efx->filter_sem);
@@ -3829,38 +3942,27 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
3829 efx_net_open(efx->net_dev); 3942 efx_net_open(efx->net_dev);
3830 netif_device_attach(efx->net_dev); 3943 netif_device_attach(efx->net_dev);
3831 3944
3832#if !defined(CONFIG_SFC_SRIOV) 3945#ifdef CONFIG_SFC_SRIOV
3833 if (rc == -EPERM) 3946 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
3834 netif_err(efx, drv, efx->net_dev,
3835 "Cannot change MAC address; use sfboot to enable mac-spoofing"
3836 " on this interface\n");
3837#else
3838 if (rc == -EPERM) {
3839 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 3947 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
3840 3948
3841 /* Switch to PF and change MAC address on vport */ 3949 if (rc == -EPERM) {
3842 if (efx->pci_dev->is_virtfn && pci_dev_pf) { 3950 struct efx_nic *efx_pf;
3843 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
3844 3951
3845 if (!efx_ef10_sriov_set_vf_mac(efx_pf, 3952 /* Switch to PF and change MAC address on vport */
3846 nic_data->vf_index, 3953 efx_pf = pci_get_drvdata(pci_dev_pf);
3847 efx->net_dev->dev_addr))
3848 return 0;
3849 }
3850 netif_err(efx, drv, efx->net_dev,
3851 "Cannot change MAC address; use sfboot to enable mac-spoofing"
3852 " on this interface\n");
3853 } else if (efx->pci_dev->is_virtfn) {
3854 /* Successfully changed by VF (with MAC spoofing), so update the
3855 * parent PF if possible.
3856 */
3857 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
3858 3954
3859 if (pci_dev_pf) { 3955 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
3956 nic_data->vf_index,
3957 efx->net_dev->dev_addr);
3958 } else if (!rc) {
3860 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 3959 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
3861 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 3960 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
3862 unsigned int i; 3961 unsigned int i;
3863 3962
3963 /* MAC address successfully changed by VF (with MAC
3964 * spoofing) so update the parent PF if possible.
3965 */
3864 for (i = 0; i < efx_pf->vf_count; ++i) { 3966 for (i = 0; i < efx_pf->vf_count; ++i) {
3865 struct ef10_vf *vf = nic_data->vf + i; 3967 struct ef10_vf *vf = nic_data->vf + i;
3866 3968
@@ -3871,8 +3973,24 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
3871 } 3973 }
3872 } 3974 }
3873 } 3975 }
3874 } 3976 } else
3875#endif 3977#endif
3978 if (rc == -EPERM) {
3979 netif_err(efx, drv, efx->net_dev,
3980 "Cannot change MAC address; use sfboot to enable"
3981 " mac-spoofing on this interface\n");
3982 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
3983 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
3984 * fall-back to the method of changing the MAC address on the
3985 * vport. This only applies to PFs because such versions of
3986 * MCFW do not support VFs.
3987 */
3988 rc = efx_ef10_vport_set_mac_address(efx);
3989 } else {
3990 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
3991 sizeof(inbuf), NULL, 0, rc);
3992 }
3993
3876 return rc; 3994 return rc;
3877} 3995}
3878 3996
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 6c9b6e45509a..3c17f274e802 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -29,30 +29,6 @@ static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
29 NULL, 0, NULL); 29 NULL, 0, NULL);
30} 30}
31 31
32static int efx_ef10_vport_add_mac(struct efx_nic *efx,
33 unsigned int port_id, u8 *mac)
34{
35 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
36
37 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
38 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
39
40 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
41 sizeof(inbuf), NULL, 0, NULL);
42}
43
44static int efx_ef10_vport_del_mac(struct efx_nic *efx,
45 unsigned int port_id, u8 *mac)
46{
47 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
48
49 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
50 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
51
52 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
53 sizeof(inbuf), NULL, 0, NULL);
54}
55
56static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id, 32static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
57 unsigned int vswitch_type) 33 unsigned int vswitch_type)
58{ 34{
@@ -136,24 +112,6 @@ static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
136 NULL, 0, NULL); 112 NULL, 0, NULL);
137} 113}
138 114
139static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
140{
141 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
142
143 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
144 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
145 NULL, 0, NULL);
146}
147
148static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
149{
150 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
151
152 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
153 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
154 NULL, 0, NULL);
155}
156
157static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx) 115static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
158{ 116{
159 struct efx_ef10_nic_data *nic_data = efx->nic_data; 117 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -640,21 +598,21 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
640 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL, 598 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
641 vf->vlan, &vf->vport_id); 599 vf->vlan, &vf->vport_id);
642 if (rc) 600 if (rc)
643 goto reset_nic; 601 goto reset_nic_up_write;
644 602
645restore_mac: 603restore_mac:
646 if (!is_zero_ether_addr(vf->mac)) { 604 if (!is_zero_ether_addr(vf->mac)) {
647 rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac); 605 rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
648 if (rc2) { 606 if (rc2) {
649 eth_zero_addr(vf->mac); 607 eth_zero_addr(vf->mac);
650 goto reset_nic; 608 goto reset_nic_up_write;
651 } 609 }
652 } 610 }
653 611
654restore_evb_port: 612restore_evb_port:
655 rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i); 613 rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
656 if (rc2) 614 if (rc2)
657 goto reset_nic; 615 goto reset_nic_up_write;
658 else 616 else
659 vf->vport_assigned = 1; 617 vf->vport_assigned = 1;
660 618
@@ -662,14 +620,16 @@ restore_vadaptor:
662 if (vf->efx) { 620 if (vf->efx) {
663 rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED); 621 rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
664 if (rc2) 622 if (rc2)
665 goto reset_nic; 623 goto reset_nic_up_write;
666 } 624 }
667 625
668restore_filters: 626restore_filters:
669 if (vf->efx) { 627 if (vf->efx) {
670 rc2 = vf->efx->type->filter_table_probe(vf->efx); 628 rc2 = vf->efx->type->filter_table_probe(vf->efx);
671 if (rc2) 629 if (rc2)
672 goto reset_nic; 630 goto reset_nic_up_write;
631
632 up_write(&vf->efx->filter_sem);
673 633
674 up_write(&vf->efx->filter_sem); 634 up_write(&vf->efx->filter_sem);
675 635
@@ -681,9 +641,12 @@ restore_filters:
681 } 641 }
682 return rc; 642 return rc;
683 643
644reset_nic_up_write:
645 if (vf->efx)
646 up_write(&vf->efx->filter_sem);
647
684reset_nic: 648reset_nic:
685 if (vf->efx) { 649 if (vf->efx) {
686 up_write(&vf->efx->filter_sem);
687 netif_err(efx, drv, efx->net_dev, 650 netif_err(efx, drv, efx->net_dev,
688 "Failed to restore VF - scheduling reset.\n"); 651 "Failed to restore VF - scheduling reset.\n");
689 efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH); 652 efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index db4ef537c610..6d25b92cb45e 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -65,5 +65,11 @@ int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
65int efx_ef10_vswitching_restore_vf(struct efx_nic *efx); 65int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
66void efx_ef10_vswitching_remove_pf(struct efx_nic *efx); 66void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
67void efx_ef10_vswitching_remove_vf(struct efx_nic *efx); 67void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
68int efx_ef10_vport_add_mac(struct efx_nic *efx,
69 unsigned int port_id, u8 *mac);
70int efx_ef10_vport_del_mac(struct efx_nic *efx,
71 unsigned int port_id, u8 *mac);
72int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
73int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
68 74
69#endif /* EF10_SRIOV_H */ 75#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 804b9ad553d3..03bc03b67f08 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -245,11 +245,17 @@ static int efx_check_disabled(struct efx_nic *efx)
245 */ 245 */
246static int efx_process_channel(struct efx_channel *channel, int budget) 246static int efx_process_channel(struct efx_channel *channel, int budget)
247{ 247{
248 struct efx_tx_queue *tx_queue;
248 int spent; 249 int spent;
249 250
250 if (unlikely(!channel->enabled)) 251 if (unlikely(!channel->enabled))
251 return 0; 252 return 0;
252 253
254 efx_for_each_channel_tx_queue(tx_queue, channel) {
255 tx_queue->pkts_compl = 0;
256 tx_queue->bytes_compl = 0;
257 }
258
253 spent = efx_nic_process_eventq(channel, budget); 259 spent = efx_nic_process_eventq(channel, budget);
254 if (spent && efx_channel_has_rx_queue(channel)) { 260 if (spent && efx_channel_has_rx_queue(channel)) {
255 struct efx_rx_queue *rx_queue = 261 struct efx_rx_queue *rx_queue =
@@ -259,6 +265,14 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
259 efx_fast_push_rx_descriptors(rx_queue, true); 265 efx_fast_push_rx_descriptors(rx_queue, true);
260 } 266 }
261 267
268 /* Update BQL */
269 efx_for_each_channel_tx_queue(tx_queue, channel) {
270 if (tx_queue->bytes_compl) {
271 netdev_tx_completed_queue(tx_queue->core_txq,
272 tx_queue->pkts_compl, tx_queue->bytes_compl);
273 }
274 }
275
262 return spent; 276 return spent;
263} 277}
264 278
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index d72f522bf9c3..47d1e3a96522 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -241,6 +241,8 @@ struct efx_tx_queue {
241 unsigned int read_count ____cacheline_aligned_in_smp; 241 unsigned int read_count ____cacheline_aligned_in_smp;
242 unsigned int old_write_count; 242 unsigned int old_write_count;
243 unsigned int merge_events; 243 unsigned int merge_events;
244 unsigned int bytes_compl;
245 unsigned int pkts_compl;
244 246
245 /* Members used only on the xmit path */ 247 /* Members used only on the xmit path */
246 unsigned int insert_count ____cacheline_aligned_in_smp; 248 unsigned int insert_count ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index aaf2987512b5..1833a0146571 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -617,7 +617,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
617 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); 617 EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
618 618
619 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 619 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
620 netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); 620 tx_queue->pkts_compl += pkts_compl;
621 tx_queue->bytes_compl += bytes_compl;
621 622
622 if (pkts_compl > 1) 623 if (pkts_compl > 1)
623 ++tx_queue->merge_events; 624 ++tx_queue->merge_events;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
42#define NSS_COMMON_CLK_DIV_MASK 0x7f 42#define NSS_COMMON_CLK_DIV_MASK 0x7f
43 43
44#define NSS_COMMON_CLK_SRC_CTRL 0x14 44#define NSS_COMMON_CLK_SRC_CTRL 0x14
45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) 45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
46/* Mode is coded on 1 bit but is different depending on the MAC ID: 46/* Mode is coded on 1 bit but is different depending on the MAC ID:
47 * MAC0: QSGMII=0 RGMII=1 47 * MAC0: QSGMII=0 RGMII=1
48 * MAC1: QSGMII=0 SGMII=0 RGMII=1 48 * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
291 291
292 /* Configure the clock src according to the mode */ 292 /* Configure the clock src according to the mode */
293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); 293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
294 val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); 294 val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
295 switch (gmac->phy_mode) { 295 switch (gmac->phy_mode) {
296 case PHY_INTERFACE_MODE_RGMII: 296 case PHY_INTERFACE_MODE_RGMII:
297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << 297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
2843 if (res->mac) 2843 if (res->mac)
2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
2845 2845
2846 dev_set_drvdata(device, priv); 2846 dev_set_drvdata(device, priv->dev);
2847 2847
2848 /* Verify driver arguments */ 2848 /* Verify driver arguments */
2849 stmmac_verify_args(); 2849 stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
414 stmmac_pltfr_resume); 414 stmmac_pltfr_resume);
415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
416
417MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
418MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
419MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6658 struct sk_buff *skb_new; 6658 struct sk_buff *skb_new;
6659 6659
6660 skb_new = skb_realloc_headroom(skb, len); 6660 skb_new = skb_realloc_headroom(skb, len);
6661 if (!skb_new) { 6661 if (!skb_new)
6662 rp->tx_errors++;
6663 goto out_drop; 6662 goto out_drop;
6664 }
6665 kfree_skb(skb); 6663 kfree_skb(skb);
6666 skb = skb_new; 6664 skb = skb_new;
6667 } else 6665 } else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 462820514fae..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -138,19 +138,6 @@ do { \
138#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT) 138#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
139#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1) 139#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
140 140
141#define cpsw_enable_irq(priv) \
142 do { \
143 u32 i; \
144 for (i = 0; i < priv->num_irqs; i++) \
145 enable_irq(priv->irqs_table[i]); \
146 } while (0)
147#define cpsw_disable_irq(priv) \
148 do { \
149 u32 i; \
150 for (i = 0; i < priv->num_irqs; i++) \
151 disable_irq_nosync(priv->irqs_table[i]); \
152 } while (0)
153
154#define cpsw_slave_index(priv) \ 141#define cpsw_slave_index(priv) \
155 ((priv->data.dual_emac) ? priv->emac_port : \ 142 ((priv->data.dual_emac) ? priv->emac_port : \
156 priv->data.active_slave) 143 priv->data.active_slave)
@@ -509,9 +496,11 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
509 (func)(slave++, ##arg); \ 496 (func)(slave++, ##arg); \
510 } while (0) 497 } while (0)
511#define cpsw_get_slave_ndev(priv, __slave_no__) \ 498#define cpsw_get_slave_ndev(priv, __slave_no__) \
512 (priv->slaves[__slave_no__].ndev) 499 ((__slave_no__ < priv->data.slaves) ? \
500 priv->slaves[__slave_no__].ndev : NULL)
513#define cpsw_get_slave_priv(priv, __slave_no__) \ 501#define cpsw_get_slave_priv(priv, __slave_no__) \
514 ((priv->slaves[__slave_no__].ndev) ? \ 502 (((__slave_no__ < priv->data.slaves) && \
503 (priv->slaves[__slave_no__].ndev)) ? \
515 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \ 504 netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
516 505
517#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \ 506#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
@@ -781,7 +770,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
781 770
782 cpsw_intr_disable(priv); 771 cpsw_intr_disable(priv);
783 if (priv->irq_enabled == true) { 772 if (priv->irq_enabled == true) {
784 cpsw_disable_irq(priv); 773 disable_irq_nosync(priv->irqs_table[0]);
785 priv->irq_enabled = false; 774 priv->irq_enabled = false;
786 } 775 }
787 776
@@ -804,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
804static int cpsw_poll(struct napi_struct *napi, int budget) 793static int cpsw_poll(struct napi_struct *napi, int budget)
805{ 794{
806 struct cpsw_priv *priv = napi_to_priv(napi); 795 struct cpsw_priv *priv = napi_to_priv(napi);
807 int num_tx, num_rx; 796 int num_rx;
808
809 num_tx = cpdma_chan_process(priv->txch, 128);
810 797
811 num_rx = cpdma_chan_process(priv->rxch, budget); 798 num_rx = cpdma_chan_process(priv->rxch, budget);
812 if (num_rx < budget) { 799 if (num_rx < budget) {
@@ -817,13 +804,12 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
817 prim_cpsw = cpsw_get_slave_priv(priv, 0); 804 prim_cpsw = cpsw_get_slave_priv(priv, 0);
818 if (prim_cpsw->irq_enabled == false) { 805 if (prim_cpsw->irq_enabled == false) {
819 prim_cpsw->irq_enabled = true; 806 prim_cpsw->irq_enabled = true;
820 cpsw_enable_irq(priv); 807 enable_irq(priv->irqs_table[0]);
821 } 808 }
822 } 809 }
823 810
824 if (num_rx || num_tx) 811 if (num_rx)
825 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 812 cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
826 num_rx, num_tx);
827 813
828 return num_rx; 814 return num_rx;
829} 815}
@@ -1333,7 +1319,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
1333 if (prim_cpsw->irq_enabled == false) { 1319 if (prim_cpsw->irq_enabled == false) {
1334 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { 1320 if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
1335 prim_cpsw->irq_enabled = true; 1321 prim_cpsw->irq_enabled = true;
1336 cpsw_enable_irq(prim_cpsw); 1322 enable_irq(prim_cpsw->irqs_table[0]);
1337 } 1323 }
1338 } 1324 }
1339 1325
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
85 struct list_head rxhook_list_head; 85 struct list_head rxhook_list_head;
86 unsigned int rx_queue_id; 86 unsigned int rx_queue_id;
87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; 87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
88 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
89 struct napi_struct rx_napi; 88 struct napi_struct rx_napi;
90 struct napi_struct tx_napi; 89 struct napi_struct tx_napi;
91 90
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
223 222
224/* SGMII functions */ 223/* SGMII functions */
225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); 224int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
225bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); 226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); 227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
228 228
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64 35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ) 36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN 38#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16 39#define NETCP_MAX_MCAST_ADDR 16
39 40
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
804 if (likely(fdq == 0)) { 805 if (likely(fdq == 0)) {
805 unsigned int primary_buf_len; 806 unsigned int primary_buf_len;
806 /* Allocate a primary receive queue entry */ 807 /* Allocate a primary receive queue entry */
807 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; 808 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
808 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
810 811
811 if (primary_buf_len <= PAGE_SIZE) { 812 bufptr = netdev_alloc_frag(primary_buf_len);
812 bufptr = netdev_alloc_frag(primary_buf_len); 813 pad[1] = primary_buf_len;
813 pad[1] = primary_buf_len;
814 } else {
815 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
816 GFP_DMA32 | __GFP_COLD);
817 pad[1] = 0;
818 }
819 814
820 if (unlikely(!bufptr)) { 815 if (unlikely(!bufptr)) {
821 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); 816 dev_warn_ratelimited(netcp->ndev_dev,
817 "Primary RX buffer alloc failed\n");
822 goto fail; 818 goto fail;
823 } 819 }
824 dma = dma_map_single(netcp->dev, bufptr, buf_len, 820 dma = dma_map_single(netcp->dev, bufptr, buf_len,
825 DMA_TO_DEVICE); 821 DMA_TO_DEVICE);
822 if (unlikely(dma_mapping_error(netcp->dev, dma)))
823 goto fail;
824
826 pad[0] = (u32)bufptr; 825 pad[0] = (u32)bufptr;
827 826
828 } else { 827 } else {
829 /* Allocate a secondary receive queue entry */ 828 /* Allocate a secondary receive queue entry */
830 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); 829 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
831 if (unlikely(!page)) { 830 if (unlikely(!page)) {
832 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 831 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
833 goto fail; 832 goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1010 1009
1011 /* Map the linear buffer */ 1010 /* Map the linear buffer */
1012 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1011 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1013 if (unlikely(!dma_addr)) { 1012 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1014 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1013 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1015 return NULL; 1014 return NULL;
1016 } 1015 }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1546 knav_queue_disable_notify(netcp->rx_queue); 1545 knav_queue_disable_notify(netcp->rx_queue);
1547 1546
1548 /* open Rx FDQs */ 1547 /* open Rx FDQs */
1549 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1548 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1550 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { 1549 ++i) {
1551 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1550 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1552 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1551 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1553 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { 1552 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1617,11 +1616,11 @@ static int netcp_ndo_open(struct net_device *ndev)
1617 } 1616 }
1618 mutex_unlock(&netcp_modules_lock); 1617 mutex_unlock(&netcp_modules_lock);
1619 1618
1620 netcp_rxpool_refill(netcp);
1621 napi_enable(&netcp->rx_napi); 1619 napi_enable(&netcp->rx_napi);
1622 napi_enable(&netcp->tx_napi); 1620 napi_enable(&netcp->tx_napi);
1623 knav_queue_enable_notify(netcp->tx_compl_q); 1621 knav_queue_enable_notify(netcp->tx_compl_q);
1624 knav_queue_enable_notify(netcp->rx_queue); 1622 knav_queue_enable_notify(netcp->rx_queue);
1623 netcp_rxpool_refill(netcp);
1625 netif_tx_wake_all_queues(ndev); 1624 netif_tx_wake_all_queues(ndev);
1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1625 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1627 return 0; 1626 return 0;
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
1941 netcp->rx_queue_depths[0] = 128; 1940 netcp->rx_queue_depths[0] = 128;
1942 } 1941 }
1943 1942
1944 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1945 netcp->rx_buffer_sizes,
1946 KNAV_DMA_FDQ_PER_CHAN);
1947 if (ret) {
1948 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1949 netcp->rx_buffer_sizes[0] = 1536;
1950 }
1951
1952 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 1943 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1953 if (ret < 0) { 1944 if (ret < 0) {
1954 dev_err(dev, "missing \"rx-pool\" parameter\n"); 1945 dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2112,6 +2103,7 @@ probe_quit:
2112static int netcp_remove(struct platform_device *pdev) 2103static int netcp_remove(struct platform_device *pdev)
2113{ 2104{
2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2105 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2106 struct netcp_intf *netcp_intf, *netcp_tmp;
2115 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2107 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2116 struct netcp_module *module; 2108 struct netcp_module *module;
2117 2109
@@ -2123,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev)
2123 list_del(&inst_modpriv->inst_list); 2115 list_del(&inst_modpriv->inst_list);
2124 kfree(inst_modpriv); 2116 kfree(inst_modpriv);
2125 } 2117 }
2126 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2127 pdev->name);
2128 2118
2129 devm_kfree(&pdev->dev, netcp_device); 2119 /* now that all modules are removed, clean up the interfaces */
2120 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2121 &netcp_device->interface_head,
2122 interface_list) {
2123 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2124 }
2125
2126 WARN(!list_empty(&netcp_device->interface_head),
2127 "%s interface list not empty!\n", pdev->name);
2128
2130 pm_runtime_put_sync(&pdev->dev); 2129 pm_runtime_put_sync(&pdev->dev);
2131 pm_runtime_disable(&pdev->dev); 2130 pm_runtime_disable(&pdev->dev);
2132 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902} 1902}
1903 1903
1904static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 struct gbe_slave *slave, bool set)
1906{
1907 void __iomem *sgmii_port_regs;
1908
1909 if (SLAVE_LINK_IS_XGMII(slave))
1910 return;
1911
1912 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 sgmii_port_regs = priv->sgmii_port34_regs;
1914 else
1915 sgmii_port_regs = priv->sgmii_port_regs;
1916
1917 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918}
1919
1904static void gbe_slave_stop(struct gbe_intf *intf) 1920static void gbe_slave_stop(struct gbe_intf *intf)
1905{ 1921{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev; 1922 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave; 1923 struct gbe_slave *slave = intf->slave;
1908 1924
1925 gbe_sgmii_rtreset(gbe_dev, slave, true);
1909 gbe_port_reset(slave); 1926 gbe_port_reset(slave);
1910 /* Disable forwarding */ 1927 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1928 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
1947 1964
1948 gbe_sgmii_config(priv, slave); 1965 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave); 1966 gbe_port_reset(slave);
1967 gbe_sgmii_rtreset(priv, slave, false);
1950 gbe_port_config(priv, slave, priv->rx_packet_max); 1968 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf); 1969 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */ 1970 /* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{ 2508{
2491 struct gbe_slave *slave; 2509 struct gbe_slave *slave;
2492 2510
2493 for (;;) { 2511 while (!list_empty(&gbe_dev->secondary_slaves)) {
2494 slave = first_sec_slave(gbe_dev); 2512 slave = first_sec_slave(gbe_dev);
2495 if (!slave) 2513
2496 break;
2497 if (slave->phy) 2514 if (slave->phy)
2498 phy_disconnect(slave->phy); 2515 phy_disconnect(slave->phy);
2499 list_del(&slave->slave_list); 2516 list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2839 &gbe_dev->dma_chan_name); 2856 &gbe_dev->dma_chan_name);
2840 if (ret < 0) { 2857 if (ret < 0) {
2841 dev_err(dev, "missing \"tx-channel\" parameter\n"); 2858 dev_err(dev, "missing \"tx-channel\" parameter\n");
2842 ret = -ENODEV; 2859 return -EINVAL;
2843 goto quit;
2844 } 2860 }
2845 2861
2846 if (!strcmp(node->name, "gbe")) { 2862 if (!strcmp(node->name, "gbe")) {
2847 ret = get_gbe_resource_version(gbe_dev, node); 2863 ret = get_gbe_resource_version(gbe_dev, node);
2848 if (ret) 2864 if (ret)
2849 goto quit; 2865 return ret;
2850 2866
2851 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 2867 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852 2868
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2857 else 2873 else
2858 ret = -ENODEV; 2874 ret = -ENODEV;
2859 2875
2860 if (ret)
2861 goto quit;
2862 } else if (!strcmp(node->name, "xgbe")) { 2876 } else if (!strcmp(node->name, "xgbe")) {
2863 ret = set_xgbe_ethss10_priv(gbe_dev, node); 2877 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864 if (ret) 2878 if (ret)
2865 goto quit; 2879 return ret;
2866 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 2880 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867 gbe_dev->ss_regs); 2881 gbe_dev->ss_regs);
2868 if (ret)
2869 goto quit;
2870 } else { 2882 } else {
2871 dev_err(dev, "unknown GBE node(%s)\n", node->name); 2883 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872 ret = -ENODEV; 2884 ret = -ENODEV;
2873 goto quit;
2874 } 2885 }
2875 2886
2887 if (ret)
2888 return ret;
2889
2876 interfaces = of_get_child_by_name(node, "interfaces"); 2890 interfaces = of_get_child_by_name(node, "interfaces");
2877 if (!interfaces) 2891 if (!interfaces)
2878 dev_err(dev, "could not find interfaces\n"); 2892 dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2880 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 2894 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 2895 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882 if (ret) 2896 if (ret)
2883 goto quit; 2897 return ret;
2884 2898
2885 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 2899 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886 if (ret) 2900 if (ret)
2887 goto quit; 2901 return ret;
2888 2902
2889 /* Create network interfaces */ 2903 /* Create network interfaces */
2890 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 2904 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2899 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2913 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900 break; 2914 break;
2901 } 2915 }
2916 of_node_put(interfaces);
2902 2917
2903 if (!gbe_dev->num_slaves) 2918 if (!gbe_dev->num_slaves)
2904 dev_warn(dev, "No network interface configured\n"); 2919 dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2911 of_node_put(secondary_ports); 2926 of_node_put(secondary_ports);
2912 2927
2913 if (!gbe_dev->num_slaves) { 2928 if (!gbe_dev->num_slaves) {
2914 dev_err(dev, "No network interface or secondary ports configured\n"); 2929 dev_err(dev,
2930 "No network interface or secondary ports configured\n");
2915 ret = -ENODEV; 2931 ret = -ENODEV;
2916 goto quit; 2932 goto free_sec_ports;
2917 } 2933 }
2918 2934
2919 memset(&ale_params, 0, sizeof(ale_params)); 2935 memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2927 if (!gbe_dev->ale) { 2943 if (!gbe_dev->ale) {
2928 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 2944 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929 ret = -ENODEV; 2945 ret = -ENODEV;
2930 goto quit; 2946 goto free_sec_ports;
2931 } else { 2947 } else {
2932 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 2948 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933 } 2949 }
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2943 *inst_priv = gbe_dev; 2959 *inst_priv = gbe_dev;
2944 return 0; 2960 return 0;
2945 2961
2946quit: 2962free_sec_ports:
2947 if (gbe_dev->hw_stats) 2963 free_secondary_ports(gbe_dev);
2948 devm_kfree(dev, gbe_dev->hw_stats);
2949 cpsw_ale_destroy(gbe_dev->ale);
2950 if (gbe_dev->ss_regs)
2951 devm_iounmap(dev, gbe_dev->ss_regs);
2952 of_node_put(interfaces);
2953 devm_kfree(dev, gbe_dev);
2954 return ret; 2964 return ret;
2955} 2965}
2956 2966
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3023 free_secondary_ports(gbe_dev); 3033 free_secondary_ports(gbe_dev);
3024 3034
3025 if (!list_empty(&gbe_dev->gbe_intf_head)) 3035 if (!list_empty(&gbe_dev->gbe_intf_head))
3026 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 3036 dev_alert(gbe_dev->dev,
3037 "unreleased ethss interfaces present\n");
3027 3038
3028 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031 devm_kfree(gbe_dev->dev, gbe_dev);
3032 return 0; 3039 return 0;
3033} 3040}
3034 3041
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
18 18
19#include "netcp.h" 19#include "netcp.h"
20 20
21#define SGMII_SRESET_RESET BIT(0)
22#define SGMII_SRESET_RTRESET BIT(1)
23
21#define SGMII_REG_STATUS_LOCK BIT(4) 24#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0) 25#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2) 26#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) 54int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{ 55{
53 /* Soft reset */ 56 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); 57 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) 58 SGMII_SRESET_RESET);
59
60 while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
61 SGMII_SRESET_RESET) != 0x0)
56 ; 62 ;
63
57 return 0; 64 return 0;
58} 65}
59 66
67/* port is 0 based */
68bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
69{
70 u32 reg;
71 bool oldval;
72
73 /* Initiate a soft reset */
74 reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
75 oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
76 if (set)
77 reg |= SGMII_SRESET_RTRESET;
78 else
79 reg &= ~SGMII_SRESET_RTRESET;
80 sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
81 wmb();
82
83 return oldval;
84}
85
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) 86int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{ 87{
62 u32 status = 0, link = 0; 88 u32 status = 0, link = 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4208dd7ef101..d95f9aae95e7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1530,9 +1530,9 @@ static int axienet_probe(struct platform_device *pdev)
1530 /* Map device registers */ 1530 /* Map device registers */
1531 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1531 ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1532 lp->regs = devm_ioremap_resource(&pdev->dev, ethres); 1532 lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
1533 if (!lp->regs) { 1533 if (IS_ERR(lp->regs)) {
1534 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n"); 1534 dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
1535 ret = -ENOMEM; 1535 ret = PTR_ERR(lp->regs);
1536 goto free_netdev; 1536 goto free_netdev;
1537 } 1537 }
1538 1538
@@ -1599,9 +1599,9 @@ static int axienet_probe(struct platform_device *pdev)
1599 goto free_netdev; 1599 goto free_netdev;
1600 } 1600 }
1601 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1601 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1602 if (!lp->dma_regs) { 1602 if (IS_ERR(lp->dma_regs)) {
1603 dev_err(&pdev->dev, "could not map DMA regs\n"); 1603 dev_err(&pdev->dev, "could not map DMA regs\n");
1604 ret = -ENOMEM; 1604 ret = PTR_ERR(lp->dma_regs);
1605 goto free_netdev; 1605 goto free_netdev;
1606 } 1606 }
1607 lp->rx_irq = irq_of_parse_and_map(np, 1); 1607 lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 7856b6ccf5c5..d95a50ae996d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -482,6 +482,7 @@ static void bpq_setup(struct net_device *dev)
482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN); 482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
483 483
484 dev->flags = 0; 484 dev->flags = 0;
485 dev->features = NETIF_F_LLTX; /* Allow recursion */
485 486
486#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 487#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
487 dev->header_ops = &ax25_header_ops; 488 dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
728 dev->type = ARPHRD_AX25; 728 dev->type = ARPHRD_AX25;
729 729
730 /* Perform the low-level AX25 initialization. */ 730 /* Perform the low-level AX25 initialization. */
731 if ((err = ax_open(ax->dev))) { 731 err = ax_open(ax->dev);
732 if (err)
732 goto out_free_netdev; 733 goto out_free_netdev;
733 }
734 734
735 if (register_netdev(dev)) 735 err = register_netdev(dev);
736 if (err)
736 goto out_free_buffers; 737 goto out_free_buffers;
737 738
738 /* after register_netdev() - because else printk smashes the kernel */ 739 /* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
67 struct ipvl_port *port; 67 struct ipvl_port *port;
68 struct net_device *phy_dev; 68 struct net_device *phy_dev;
69 struct list_head addrs; 69 struct list_head addrs;
70 int ipv4cnt;
71 int ipv6cnt;
72 struct ipvl_pcpu_stats __percpu *pcpu_stats; 70 struct ipvl_pcpu_stats __percpu *pcpu_stats;
73 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
74 netdev_features_t sfeatures; 72 netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
106 return rcu_dereference(d->rx_handler_data); 104 return rcu_dereference(d->rx_handler_data);
107} 105}
108 106
107static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
108{
109 return rcu_dereference_bh(d->rx_handler_data);
110}
111
109static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 112static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
110{ 113{
111 return rtnl_dereference(d->rx_handler_data); 114 return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
124bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 127bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
125struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 128struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
126 const void *iaddr, bool is_v6); 129 const void *iaddr, bool is_v6);
127void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 130void ipvlan_ht_addr_del(struct ipvl_addr *addr);
128#endif /* __IPVLAN_H */ 131#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
86} 86}
87 87
88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr)
89{ 89{
90 hlist_del_init_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
91 if (sync)
92 synchronize_rcu();
93} 91}
94 92
95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 93struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
531int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 529int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
532{ 530{
533 struct ipvl_dev *ipvlan = netdev_priv(dev); 531 struct ipvl_dev *ipvlan = netdev_priv(dev);
534 struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 532 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
535 533
536 if (!port) 534 if (!port)
537 goto out; 535 goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
153 else 153 else
154 dev->flags &= ~IFF_NOARP; 154 dev->flags &= ~IFF_NOARP;
155 155
156 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 156 list_for_each_entry(addr, &ipvlan->addrs, anode)
157 list_for_each_entry(addr, &ipvlan->addrs, anode) 157 ipvlan_ht_addr_add(ipvlan, addr);
158 ipvlan_ht_addr_add(ipvlan, addr); 158
159 }
160 return dev_uc_add(phy_dev, phy_dev->dev_addr); 159 return dev_uc_add(phy_dev, phy_dev->dev_addr);
161} 160}
162 161
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
171 170
172 dev_uc_del(phy_dev, phy_dev->dev_addr); 171 dev_uc_del(phy_dev, phy_dev->dev_addr);
173 172
174 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 173 list_for_each_entry(addr, &ipvlan->addrs, anode)
175 list_for_each_entry(addr, &ipvlan->addrs, anode) 174 ipvlan_ht_addr_del(addr);
176 ipvlan_ht_addr_del(addr, !dev->dismantle); 175
177 }
178 return 0; 176 return 0;
179} 177}
180 178
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
471 ipvlan->port = port; 469 ipvlan->port = port;
472 ipvlan->sfeatures = IPVLAN_FEATURES; 470 ipvlan->sfeatures = IPVLAN_FEATURES;
473 INIT_LIST_HEAD(&ipvlan->addrs); 471 INIT_LIST_HEAD(&ipvlan->addrs);
474 ipvlan->ipv4cnt = 0;
475 ipvlan->ipv6cnt = 0;
476 472
477 /* TODO Probably put random address here to be presented to the 473 /* TODO Probably put random address here to be presented to the
478 * world but keep using the physical-dev address for the outgoing 474 * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
508 struct ipvl_dev *ipvlan = netdev_priv(dev); 504 struct ipvl_dev *ipvlan = netdev_priv(dev);
509 struct ipvl_addr *addr, *next; 505 struct ipvl_addr *addr, *next;
510 506
511 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 507 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
512 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 508 ipvlan_ht_addr_del(addr);
513 ipvlan_ht_addr_del(addr, !dev->dismantle); 509 list_del(&addr->anode);
514 list_del(&addr->anode); 510 kfree_rcu(addr, rcu);
515 }
516 } 511 }
512
517 list_del_rcu(&ipvlan->pnode); 513 list_del_rcu(&ipvlan->pnode);
518 unregister_netdevice_queue(dev, head); 514 unregister_netdevice_queue(dev, head);
519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); 515 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 623 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
628 addr->atype = IPVL_IPV6; 624 addr->atype = IPVL_IPV6;
629 list_add_tail(&addr->anode, &ipvlan->addrs); 625 list_add_tail(&addr->anode, &ipvlan->addrs);
630 ipvlan->ipv6cnt++; 626
631 /* If the interface is not up, the address will be added to the hash 627 /* If the interface is not up, the address will be added to the hash
632 * list by ipvlan_open. 628 * list by ipvlan_open.
633 */ 629 */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
645 if (!addr) 641 if (!addr)
646 return; 642 return;
647 643
648 ipvlan_ht_addr_del(addr, true); 644 ipvlan_ht_addr_del(addr);
649 list_del(&addr->anode); 645 list_del(&addr->anode);
650 ipvlan->ipv6cnt--;
651 WARN_ON(ipvlan->ipv6cnt < 0);
652 kfree_rcu(addr, rcu); 646 kfree_rcu(addr, rcu);
653 647
654 return; 648 return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
661 struct net_device *dev = (struct net_device *)if6->idev->dev; 655 struct net_device *dev = (struct net_device *)if6->idev->dev;
662 struct ipvl_dev *ipvlan = netdev_priv(dev); 656 struct ipvl_dev *ipvlan = netdev_priv(dev);
663 657
658 /* FIXME IPv6 autoconf calls us from bh without RTNL */
659 if (in_softirq())
660 return NOTIFY_DONE;
661
664 if (!netif_is_ipvlan(dev)) 662 if (!netif_is_ipvlan(dev))
665 return NOTIFY_DONE; 663 return NOTIFY_DONE;
666 664
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 697 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
700 addr->atype = IPVL_IPV4; 698 addr->atype = IPVL_IPV4;
701 list_add_tail(&addr->anode, &ipvlan->addrs); 699 list_add_tail(&addr->anode, &ipvlan->addrs);
702 ipvlan->ipv4cnt++; 700
703 /* If the interface is not up, the address will be added to the hash 701 /* If the interface is not up, the address will be added to the hash
704 * list by ipvlan_open. 702 * list by ipvlan_open.
705 */ 703 */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
717 if (!addr) 715 if (!addr)
718 return; 716 return;
719 717
720 ipvlan_ht_addr_del(addr, true); 718 ipvlan_ht_addr_del(addr);
721 list_del(&addr->anode); 719 list_del(&addr->anode);
722 ipvlan->ipv4cnt--;
723 WARN_ON(ipvlan->ipv4cnt < 0);
724 kfree_rcu(addr, rcu); 720 kfree_rcu(addr, rcu);
725 721
726 return; 722 return;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index f8370808a018..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
719 struct virtio_net_hdr vnet_hdr = { 0 }; 719 struct virtio_net_hdr vnet_hdr = { 0 };
720 int vnet_hdr_len = 0; 720 int vnet_hdr_len = 0;
721 int copylen = 0; 721 int copylen = 0;
722 int depth;
722 bool zerocopy = false; 723 bool zerocopy = false;
723 size_t linear; 724 size_t linear;
724 ssize_t n; 725 ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
804 805
805 skb_probe_transport_header(skb, ETH_HLEN); 806 skb_probe_transport_header(skb, ETH_HLEN);
806 807
808 /* Move network header to the right position for VLAN tagged packets */
809 if ((skb->protocol == htons(ETH_P_8021Q) ||
810 skb->protocol == htons(ETH_P_8021AD)) &&
811 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
812 skb_set_network_header(skb, depth);
813
807 rcu_read_lock(); 814 rcu_read_lock();
808 vlan = rcu_dereference(q->vlan); 815 vlan = rcu_dereference(q->vlan);
809 /* copy skb_ubuf_info for callback when skb has no error */ 816 /* copy skb_ubuf_info for callback when skb has no error */
@@ -1355,6 +1362,7 @@ static void macvtap_exit(void)
1355 class_unregister(macvtap_class); 1362 class_unregister(macvtap_class);
1356 cdev_del(&macvtap_cdev); 1363 cdev_del(&macvtap_cdev);
1357 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1364 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1365 idr_destroy(&minor_idr);
1358} 1366}
1359module_exit(macvtap_exit); 1367module_exit(macvtap_exit);
1360 1368
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
102 102
103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); 103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
104 104
105 if (len < 0) {
106 ndev->stats.rx_errors++;
107 ndev->stats.rx_length_errors++;
108 goto enqueue_again;
109 }
110
105 skb_put(skb, len); 111 skb_put(skb, len);
106 skb->protocol = eth_type_trans(skb, ndev); 112 skb->protocol = eth_type_trans(skb, ndev);
107 skb->ip_summed = CHECKSUM_NONE; 113 skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
121 return; 127 return;
122 } 128 }
123 129
130enqueue_again:
124 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 131 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
125 if (rc) { 132 if (rc) {
126 dev_kfree_skb(skb); 133 dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
184 191
185 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
186 ndev->mtu + ETH_HLEN); 193 ndev->mtu + ETH_HLEN);
187 if (rc == -EINVAL) { 194 if (rc) {
188 dev_kfree_skb(skb); 195 dev_kfree_skb(skb);
189 goto err; 196 goto err;
190 } 197 }
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cf18940f4e84..cb86d7a01542 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -191,7 +191,7 @@ config MDIO_BUS_MUX_GPIO
191 191
192config MDIO_BUS_MUX_MMIOREG 192config MDIO_BUS_MUX_MMIOREG
193 tristate "Support for MMIO device-controlled MDIO bus multiplexers" 193 tristate "Support for MMIO device-controlled MDIO bus multiplexers"
194 depends on OF_MDIO 194 depends on OF_MDIO && HAS_IOMEM
195 select MDIO_BUS_MUX 195 select MDIO_BUS_MUX
196 help 196 help
197 This module provides a driver for MDIO bus multiplexers that 197 This module provides a driver for MDIO bus multiplexers that
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
164 return ret; 164 return ret;
165 } 165 }
166 166
167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
170 DP83867_DEVADDR, phydev->addr); 170 DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
421{ 421{
422 struct phy_device *phydev = to_phy_device(dev); 422 struct phy_device *phydev = to_phy_device(dev);
423 struct phy_driver *phydrv = to_phy_driver(drv); 423 struct phy_driver *phydrv = to_phy_driver(drv);
424 const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
425 int i;
424 426
425 if (of_driver_match_device(dev, drv)) 427 if (of_driver_match_device(dev, drv))
426 return 1; 428 return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
428 if (phydrv->match_phy_device) 430 if (phydrv->match_phy_device)
429 return phydrv->match_phy_device(phydev); 431 return phydrv->match_phy_device(phydev);
430 432
431 return (phydrv->phy_id & phydrv->phy_id_mask) == 433 if (phydev->is_c45) {
432 (phydev->phy_id & phydrv->phy_id_mask); 434 for (i = 1; i < num_ids; i++) {
435 if (!(phydev->c45_ids.devices_in_package & (1 << i)))
436 continue;
437
438 if ((phydrv->phy_id & phydrv->phy_id_mask) ==
439 (phydev->c45_ids.device_ids[i] &
440 phydrv->phy_id_mask))
441 return 1;
442 }
443 return 0;
444 } else {
445 return (phydrv->phy_id & phydrv->phy_id_mask) ==
446 (phydev->phy_id & phydrv->phy_id_mask);
447 }
433} 448}
434 449
435#ifdef CONFIG_PM 450#ifdef CONFIG_PM
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b506acb..1e1fbb049ec6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
811 bool needs_aneg = false, do_suspend = false; 811 bool needs_aneg = false, do_suspend = false;
812 enum phy_state old_state; 812 enum phy_state old_state;
813 int err = 0; 813 int err = 0;
814 int old_link;
814 815
815 mutex_lock(&phydev->lock); 816 mutex_lock(&phydev->lock);
816 817
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
896 phydev->adjust_link(phydev->attached_dev); 897 phydev->adjust_link(phydev->attached_dev);
897 break; 898 break;
898 case PHY_RUNNING: 899 case PHY_RUNNING:
899 /* Only register a CHANGE if we are 900 /* Only register a CHANGE if we are polling or ignoring
900 * polling or ignoring interrupts 901 * interrupts and link changed since latest checking.
901 */ 902 */
902 if (!phy_interrupt_is_valid(phydev)) 903 if (!phy_interrupt_is_valid(phydev)) {
903 phydev->state = PHY_CHANGELINK; 904 old_link = phydev->link;
905 err = phy_read_status(phydev);
906 if (err)
907 break;
908
909 if (old_link != phydev->link)
910 phydev->state = PHY_CHANGELINK;
911 }
904 break; 912 break;
905 case PHY_CHANGELINK: 913 case PHY_CHANGELINK:
906 err = phy_read_status(phydev); 914 err = phy_read_status(phydev);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e19d4..70b08958763a 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
91} 91}
92 92
93/* 93/*
94 * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each 94 * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
95 * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner 95 * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
96 * does send the pulses within this interval, the PHY will remained powered 96 * unstable detection of plugging in Ethernet cable.
97 * down. 97 * This workaround disables Energy Detect Power-Down mode and waiting for
98 * 98 * response on link pulses to detect presence of plugged Ethernet cable.
99 * This workaround will manually toggle the PHY on/off upon calls to read_status 99 * The Energy Detect Power-Down mode is enabled again in the end of procedure to
100 * in order to generate link test pulses if the link is down. If a link partner 100 * save approximately 220 mW of power if cable is unplugged.
101 * is present, it will respond to the pulses, which will cause the ENERGYON bit
102 * to be set and will cause the EDPD mode to be exited.
103 */ 101 */
104static int lan87xx_read_status(struct phy_device *phydev) 102static int lan87xx_read_status(struct phy_device *phydev)
105{ 103{
106 int err = genphy_read_status(phydev); 104 int err = genphy_read_status(phydev);
105 int i;
107 106
108 if (!phydev->link) { 107 if (!phydev->link) {
109 /* Disable EDPD to wake up PHY */ 108 /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
116 if (rc < 0) 115 if (rc < 0)
117 return rc; 116 return rc;
118 117
119 /* Sleep 64 ms to allow ~5 link test pulses to be sent */ 118 /* Wait max 640 ms to detect energy */
120 msleep(64); 119 for (i = 0; i < 64; i++) {
120 /* Sleep to allow link test pulses to be sent */
121 msleep(10);
122 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
123 if (rc < 0)
124 return rc;
125 if (rc & MII_LAN83C185_ENERGYON)
126 break;
127 }
121 128
122 /* Re-enable EDPD */ 129 /* Re-enable EDPD */
123 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 130 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
191 198
192 /* basic functions */ 199 /* basic functions */
193 .config_aneg = genphy_config_aneg, 200 .config_aneg = genphy_config_aneg,
194 .read_status = genphy_read_status, 201 .read_status = lan87xx_read_status,
195 .config_init = smsc_phy_config_init, 202 .config_init = smsc_phy_config_init,
196 .soft_reset = smsc_phy_reset, 203 .soft_reset = smsc_phy_reset,
197 204
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566521a7..fa8f5046afe9 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
269static void ppp_ccp_closed(struct ppp *ppp); 269static void ppp_ccp_closed(struct ppp *ppp);
270static struct compressor *find_compressor(int type); 270static struct compressor *find_compressor(int type);
271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 271static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
272static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); 272static struct ppp *ppp_create_interface(struct net *net, int unit,
273 struct file *file, int *retp);
273static void init_ppp_file(struct ppp_file *pf, int kind); 274static void init_ppp_file(struct ppp_file *pf, int kind);
274static void ppp_shutdown_interface(struct ppp *ppp);
275static void ppp_destroy_interface(struct ppp *ppp); 275static void ppp_destroy_interface(struct ppp *ppp);
276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 276static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 277static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
392 file->private_data = NULL; 392 file->private_data = NULL;
393 if (pf->kind == INTERFACE) { 393 if (pf->kind == INTERFACE) {
394 ppp = PF_TO_PPP(pf); 394 ppp = PF_TO_PPP(pf);
395 rtnl_lock();
395 if (file == ppp->owner) 396 if (file == ppp->owner)
396 ppp_shutdown_interface(ppp); 397 unregister_netdevice(ppp->dev);
398 rtnl_unlock();
397 } 399 }
398 if (atomic_dec_and_test(&pf->refcnt)) { 400 if (atomic_dec_and_test(&pf->refcnt)) {
399 switch (pf->kind) { 401 switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
593 mutex_lock(&ppp_mutex); 595 mutex_lock(&ppp_mutex);
594 if (pf->kind == INTERFACE) { 596 if (pf->kind == INTERFACE) {
595 ppp = PF_TO_PPP(pf); 597 ppp = PF_TO_PPP(pf);
598 rtnl_lock();
596 if (file == ppp->owner) 599 if (file == ppp->owner)
597 ppp_shutdown_interface(ppp); 600 unregister_netdevice(ppp->dev);
601 rtnl_unlock();
598 } 602 }
599 if (atomic_long_read(&file->f_count) < 2) { 603 if (atomic_long_read(&file->f_count) < 2) {
600 ppp_release(NULL, file); 604 ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
838 /* Create a new ppp unit */ 842 /* Create a new ppp unit */
839 if (get_user(unit, p)) 843 if (get_user(unit, p))
840 break; 844 break;
841 ppp = ppp_create_interface(net, unit, &err); 845 ppp = ppp_create_interface(net, unit, file, &err);
842 if (!ppp) 846 if (!ppp)
843 break; 847 break;
844 file->private_data = &ppp->file; 848 file->private_data = &ppp->file;
845 ppp->owner = file;
846 err = -EFAULT; 849 err = -EFAULT;
847 if (put_user(ppp->file.index, p)) 850 if (put_user(ppp->file.index, p))
848 break; 851 break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
916static __net_exit void ppp_exit_net(struct net *net) 919static __net_exit void ppp_exit_net(struct net *net)
917{ 920{
918 struct ppp_net *pn = net_generic(net, ppp_net_id); 921 struct ppp_net *pn = net_generic(net, ppp_net_id);
922 struct ppp *ppp;
923 LIST_HEAD(list);
924 int id;
925
926 rtnl_lock();
927 idr_for_each_entry(&pn->units_idr, ppp, id)
928 unregister_netdevice_queue(ppp->dev, &list);
929
930 unregister_netdevice_many(&list);
931 rtnl_unlock();
919 932
920 idr_destroy(&pn->units_idr); 933 idr_destroy(&pn->units_idr);
921} 934}
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
1088 return 0; 1101 return 0;
1089} 1102}
1090 1103
1104static void ppp_dev_uninit(struct net_device *dev)
1105{
1106 struct ppp *ppp = netdev_priv(dev);
1107 struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
1108
1109 ppp_lock(ppp);
1110 ppp->closing = 1;
1111 ppp_unlock(ppp);
1112
1113 mutex_lock(&pn->all_ppp_mutex);
1114 unit_put(&pn->units_idr, ppp->file.index);
1115 mutex_unlock(&pn->all_ppp_mutex);
1116
1117 ppp->owner = NULL;
1118
1119 ppp->file.dead = 1;
1120 wake_up_interruptible(&ppp->file.rwait);
1121}
1122
1091static const struct net_device_ops ppp_netdev_ops = { 1123static const struct net_device_ops ppp_netdev_ops = {
1092 .ndo_init = ppp_dev_init, 1124 .ndo_init = ppp_dev_init,
1125 .ndo_uninit = ppp_dev_uninit,
1093 .ndo_start_xmit = ppp_start_xmit, 1126 .ndo_start_xmit = ppp_start_xmit,
1094 .ndo_do_ioctl = ppp_net_ioctl, 1127 .ndo_do_ioctl = ppp_net_ioctl,
1095 .ndo_get_stats64 = ppp_get_stats64, 1128 .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
2667 * or if there is already a unit with the requested number. 2700 * or if there is already a unit with the requested number.
2668 * unit == -1 means allocate a new number. 2701 * unit == -1 means allocate a new number.
2669 */ 2702 */
2670static struct ppp * 2703static struct ppp *ppp_create_interface(struct net *net, int unit,
2671ppp_create_interface(struct net *net, int unit, int *retp) 2704 struct file *file, int *retp)
2672{ 2705{
2673 struct ppp *ppp; 2706 struct ppp *ppp;
2674 struct ppp_net *pn; 2707 struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2688 ppp->mru = PPP_MRU; 2721 ppp->mru = PPP_MRU;
2689 init_ppp_file(&ppp->file, INTERFACE); 2722 init_ppp_file(&ppp->file, INTERFACE);
2690 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2723 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
2724 ppp->owner = file;
2691 for (i = 0; i < NUM_NP; ++i) 2725 for (i = 0; i < NUM_NP; ++i)
2692 ppp->npmode[i] = NPMODE_PASS; 2726 ppp->npmode[i] = NPMODE_PASS;
2693 INIT_LIST_HEAD(&ppp->channels); 2727 INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
2776} 2810}
2777 2811
2778/* 2812/*
2779 * Take down a ppp interface unit - called when the owning file
2780 * (the one that created the unit) is closed or detached.
2781 */
2782static void ppp_shutdown_interface(struct ppp *ppp)
2783{
2784 struct ppp_net *pn;
2785
2786 pn = ppp_pernet(ppp->ppp_net);
2787 mutex_lock(&pn->all_ppp_mutex);
2788
2789 /* This will call dev_close() for us. */
2790 ppp_lock(ppp);
2791 if (!ppp->closing) {
2792 ppp->closing = 1;
2793 ppp_unlock(ppp);
2794 unregister_netdev(ppp->dev);
2795 unit_put(&pn->units_idr, ppp->file.index);
2796 } else
2797 ppp_unlock(ppp);
2798
2799 ppp->file.dead = 1;
2800 ppp->owner = NULL;
2801 wake_up_interruptible(&ppp->file.rwait);
2802
2803 mutex_unlock(&pn->all_ppp_mutex);
2804}
2805
2806/*
2807 * Free the memory used by a ppp unit. This is only called once 2813 * Free the memory used by a ppp unit. This is only called once
2808 * there are no channels connected to the unit and no file structs 2814 * there are no channels connected to the unit and no file structs
2809 * that reference the unit. 2815 * that reference the unit.
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4545e78840b0..35a2bffe848a 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -523,6 +523,7 @@ static const struct driver_info wwan_info = {
523#define REALTEK_VENDOR_ID 0x0bda 523#define REALTEK_VENDOR_ID 0x0bda
524#define SAMSUNG_VENDOR_ID 0x04e8 524#define SAMSUNG_VENDOR_ID 0x04e8
525#define LENOVO_VENDOR_ID 0x17ef 525#define LENOVO_VENDOR_ID 0x17ef
526#define NVIDIA_VENDOR_ID 0x0955
526 527
527static const struct usb_device_id products[] = { 528static const struct usb_device_id products[] = {
528/* BLACKLIST !! 529/* BLACKLIST !!
@@ -710,6 +711,13 @@ static const struct usb_device_id products[] = {
710 .driver_info = 0, 711 .driver_info = 0,
711}, 712},
712 713
714/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
715{
716 USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
717 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
718 .driver_info = 0,
719},
720
713/* WHITELIST!!! 721/* WHITELIST!!!
714 * 722 *
715 * CDC Ether uses two interfaces, not necessarily consecutive. 723 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index e4b7a47a825c..efc18e05af0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 158 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
159 goto err; 159 goto err;
160 160
161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting); 161 ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
162 if (ret) 162 if (ret)
163 goto err; 163 goto err;
164 164
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 8067b8fbb0ee..db40175b1a0b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -6,7 +6,7 @@
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com> 6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 * 7 *
8 * USB Host Driver for Network Control Model (NCM) 8 * USB Host Driver for Network Control Model (NCM)
9 * http://www.usb.org/developers/devclass_docs/NCM10.zip 9 * http://www.usb.org/developers/docs/devclass_docs/NCM10_012011.zip
10 * 10 *
11 * The NCM encoding, decoding and initialization logic 11 * The NCM encoding, decoding and initialization logic
12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h 12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
@@ -684,10 +684,12 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
684 ctx->tx_curr_skb = NULL; 684 ctx->tx_curr_skb = NULL;
685 } 685 }
686 686
687 kfree(ctx->delayed_ndp16);
688
687 kfree(ctx); 689 kfree(ctx);
688} 690}
689 691
690int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting) 692int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
691{ 693{
692 const struct usb_cdc_union_desc *union_desc = NULL; 694 const struct usb_cdc_union_desc *union_desc = NULL;
693 struct cdc_ncm_ctx *ctx; 695 struct cdc_ncm_ctx *ctx;
@@ -855,6 +857,17 @@ advance:
855 /* finish setting up the device specific data */ 857 /* finish setting up the device specific data */
856 cdc_ncm_setup(dev); 858 cdc_ncm_setup(dev);
857 859
860 /* Device-specific flags */
861 ctx->drvflags = drvflags;
862
863 /* Allocate the delayed NDP if needed. */
864 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
865 ctx->delayed_ndp16 = kzalloc(ctx->max_ndp_size, GFP_KERNEL);
866 if (!ctx->delayed_ndp16)
867 goto error2;
868 dev_info(&intf->dev, "NDP will be placed at end of frame for this device.");
869 }
870
858 /* override ethtool_ops */ 871 /* override ethtool_ops */
859 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; 872 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
860 873
@@ -954,8 +967,11 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
954 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) 967 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
955 return -ENODEV; 968 return -ENODEV;
956 969
957 /* The NCM data altsetting is fixed */ 970 /* The NCM data altsetting is fixed, so we hard-coded it.
958 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); 971 * Additionally, generic NCM devices are assumed to accept arbitrarily
972 * placed NDP.
973 */
974 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
959 975
960 /* 976 /*
961 * We should get an event when network connection is "connected" or 977 * We should get an event when network connection is "connected" or
@@ -986,6 +1002,14 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
986 struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data; 1002 struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
987 size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex); 1003 size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
988 1004
1005 /* If NDP should be moved to the end of the NCM package, we can't follow the
1006 * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
1007 * the wNdpIndex field in the header is actually not consistent with reality. It will be later.
1008 */
1009 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
1010 if (ctx->delayed_ndp16->dwSignature == sign)
1011 return ctx->delayed_ndp16;
1012
989 /* follow the chain of NDPs, looking for a match */ 1013 /* follow the chain of NDPs, looking for a match */
990 while (ndpoffset) { 1014 while (ndpoffset) {
991 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); 1015 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -995,7 +1019,8 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
995 } 1019 }
996 1020
997 /* align new NDP */ 1021 /* align new NDP */
998 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); 1022 if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
1023 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
999 1024
1000 /* verify that there is room for the NDP and the datagram (reserve) */ 1025 /* verify that there is room for the NDP and the datagram (reserve) */
1001 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) 1026 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size)
@@ -1008,7 +1033,11 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
1008 nth16->wNdpIndex = cpu_to_le16(skb->len); 1033 nth16->wNdpIndex = cpu_to_le16(skb->len);
1009 1034
1010 /* push a new empty NDP */ 1035 /* push a new empty NDP */
1011 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); 1036 if (!(ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END))
1037 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size);
1038 else
1039 ndp16 = ctx->delayed_ndp16;
1040
1012 ndp16->dwSignature = sign; 1041 ndp16->dwSignature = sign;
1013 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); 1042 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
1014 return ndp16; 1043 return ndp16;
@@ -1023,6 +1052,15 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1023 struct sk_buff *skb_out; 1052 struct sk_buff *skb_out;
1024 u16 n = 0, index, ndplen; 1053 u16 n = 0, index, ndplen;
1025 u8 ready2send = 0; 1054 u8 ready2send = 0;
1055 u32 delayed_ndp_size;
1056
1057 /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated
1058 * accordingly. Otherwise, we should check here.
1059 */
1060 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
1061 delayed_ndp_size = ctx->max_ndp_size;
1062 else
1063 delayed_ndp_size = 0;
1026 1064
1027 /* if there is a remaining skb, it gets priority */ 1065 /* if there is a remaining skb, it gets priority */
1028 if (skb != NULL) { 1066 if (skb != NULL) {
@@ -1077,7 +1115,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1077 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); 1115 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
1078 1116
1079 /* check if we had enough room left for both NDP and frame */ 1117 /* check if we had enough room left for both NDP and frame */
1080 if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { 1118 if (!ndp16 || skb_out->len + skb->len + delayed_ndp_size > ctx->tx_max) {
1081 if (n == 0) { 1119 if (n == 0) {
1082 /* won't fit, MTU problem? */ 1120 /* won't fit, MTU problem? */
1083 dev_kfree_skb_any(skb); 1121 dev_kfree_skb_any(skb);
@@ -1150,6 +1188,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
1150 /* variables will be reset at next call */ 1188 /* variables will be reset at next call */
1151 } 1189 }
1152 1190
1191 /* If requested, put NDP at end of frame. */
1192 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
1193 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
1194 cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
1195 nth16->wNdpIndex = cpu_to_le16(skb_out->len);
1196 memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
1197
1198 /* Zero out delayed NDP - signature checking will naturally fail. */
1199 ndp16 = memset(ctx->delayed_ndp16, 0, ctx->max_ndp_size);
1200 }
1201
1153 /* If collected data size is less or equal ctx->min_tx_pkt 1202 /* If collected data size is less or equal ctx->min_tx_pkt
1154 * bytes, we send buffers as it is. If we get more data, it 1203 * bytes, we send buffers as it is. If we get more data, it
1155 * would be more efficient for USB HS mobile device with DMA 1204 * would be more efficient for USB HS mobile device with DMA
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 735f7dadb9a0..2680a65cd5e4 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -73,11 +73,14 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
73 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 73 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
74 int ret = -ENODEV; 74 int ret = -ENODEV;
75 struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data; 75 struct huawei_cdc_ncm_state *drvstate = (void *)&usbnet_dev->data;
76 int drvflags = 0;
76 77
77 /* altsetting should always be 1 for NCM devices - so we hard-coded 78 /* altsetting should always be 1 for NCM devices - so we hard-coded
78 * it here 79 * it here. Some huawei devices will need the NDP part of the NCM package to
80 * be at the end of the frame.
79 */ 81 */
80 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1); 82 drvflags |= CDC_NCM_FLAG_NDP_TO_END;
83 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
81 if (ret) 84 if (ret)
82 goto err; 85 goto err;
83 86
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..64a60afbe50c 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
761 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
762 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
@@ -784,6 +785,7 @@ static const struct usb_device_id products[] = {
784 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 785 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
785 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 786 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
786 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 787 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
788 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
787 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ 789 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
788 790
789 /* 4. Gobi 1000 devices */ 791 /* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index aafa1a1898e4..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.08.0 (2015/01/13)" 30#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -494,6 +494,7 @@ enum rtl8152_flags {
494#define VENDOR_ID_REALTEK 0x0bda 494#define VENDOR_ID_REALTEK 0x0bda
495#define VENDOR_ID_SAMSUNG 0x04e8 495#define VENDOR_ID_SAMSUNG 0x04e8
496#define VENDOR_ID_LENOVO 0x17ef 496#define VENDOR_ID_LENOVO 0x17ef
497#define VENDOR_ID_NVIDIA 0x0955
497 498
498#define MCU_TYPE_PLA 0x0100 499#define MCU_TYPE_PLA 0x0100
499#define MCU_TYPE_USB 0x0000 500#define MCU_TYPE_USB 0x0000
@@ -1901,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1901static void rtl8152_tx_timeout(struct net_device *netdev) 1902static void rtl8152_tx_timeout(struct net_device *netdev)
1902{ 1903{
1903 struct r8152 *tp = netdev_priv(netdev); 1904 struct r8152 *tp = netdev_priv(netdev);
1904 int i;
1905 1905
1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n"); 1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n");
1907 for (i = 0; i < RTL8152_MAX_TX; i++) 1907
1908 usb_unlink_urb(tp->tx_info[i].urb); 1908 usb_queue_reset_device(tp->intf);
1909} 1909}
1910 1910
1911static void rtl8152_set_rx_mode(struct net_device *netdev) 1911static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2074,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
2074{ 2074{
2075 int i, ret = 0; 2075 int i, ret = 0;
2076 2076
2077 napi_disable(&tp->napi);
2078 INIT_LIST_HEAD(&tp->rx_done); 2077 INIT_LIST_HEAD(&tp->rx_done);
2079 for (i = 0; i < RTL8152_MAX_RX; i++) { 2078 for (i = 0; i < RTL8152_MAX_RX; i++) {
2080 INIT_LIST_HEAD(&tp->rx_info[i].list); 2079 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2082,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
2082 if (ret) 2081 if (ret)
2083 break; 2082 break;
2084 } 2083 }
2085 napi_enable(&tp->napi);
2086 2084
2087 if (ret && ++i < RTL8152_MAX_RX) { 2085 if (ret && ++i < RTL8152_MAX_RX) {
2088 struct list_head rx_queue; 2086 struct list_head rx_queue;
@@ -2165,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
2165 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2163 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2166 return -ENODEV; 2164 return -ENODEV;
2167 2165
2166 usb_disable_lpm(tp->udev);
2168 set_tx_qlen(tp); 2167 set_tx_qlen(tp);
2169 rtl_set_eee_plus(tp); 2168 rtl_set_eee_plus(tp);
2170 r8153_set_rx_early_timeout(tp); 2169 r8153_set_rx_early_timeout(tp);
@@ -2336,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2336 device_set_wakeup_enable(&tp->udev->dev, false); 2335 device_set_wakeup_enable(&tp->udev->dev, false);
2337} 2336}
2338 2337
2338static void r8153_u1u2en(struct r8152 *tp, bool enable)
2339{
2340 u8 u1u2[8];
2341
2342 if (enable)
2343 memset(u1u2, 0xff, sizeof(u1u2));
2344 else
2345 memset(u1u2, 0x00, sizeof(u1u2));
2346
2347 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2348}
2349
2350static void r8153_u2p3en(struct r8152 *tp, bool enable)
2351{
2352 u32 ocp_data;
2353
2354 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2355 if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
2356 ocp_data |= U2P3_ENABLE;
2357 else
2358 ocp_data &= ~U2P3_ENABLE;
2359 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2360}
2361
2362static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2363{
2364 u32 ocp_data;
2365
2366 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2367 if (enable)
2368 ocp_data |= PWR_EN | PHASE2_EN;
2369 else
2370 ocp_data &= ~(PWR_EN | PHASE2_EN);
2371 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2372
2373 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2374 ocp_data &= ~PCUT_STATUS;
2375 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2376}
2377
2378static bool rtl_can_wakeup(struct r8152 *tp)
2379{
2380 struct usb_device *udev = tp->udev;
2381
2382 return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
2383}
2384
2339static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) 2385static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2340{ 2386{
2341 if (enable) { 2387 if (enable) {
2342 u32 ocp_data; 2388 u32 ocp_data;
2343 2389
2390 r8153_u1u2en(tp, false);
2391 r8153_u2p3en(tp, false);
2392
2344 __rtl_set_wol(tp, WAKE_ANY); 2393 __rtl_set_wol(tp, WAKE_ANY);
2345 2394
2346 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2395 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2352,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2352 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2401 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2353 } else { 2402 } else {
2354 __rtl_set_wol(tp, tp->saved_wolopts); 2403 __rtl_set_wol(tp, tp->saved_wolopts);
2404 r8153_u2p3en(tp, true);
2405 r8153_u1u2en(tp, true);
2355 } 2406 }
2356} 2407}
2357 2408
@@ -2598,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2598 set_bit(PHY_RESET, &tp->flags); 2649 set_bit(PHY_RESET, &tp->flags);
2599} 2650}
2600 2651
2601static void r8153_u1u2en(struct r8152 *tp, bool enable)
2602{
2603 u8 u1u2[8];
2604
2605 if (enable)
2606 memset(u1u2, 0xff, sizeof(u1u2));
2607 else
2608 memset(u1u2, 0x00, sizeof(u1u2));
2609
2610 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2611}
2612
2613static void r8153_u2p3en(struct r8152 *tp, bool enable)
2614{
2615 u32 ocp_data;
2616
2617 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2618 if (enable)
2619 ocp_data |= U2P3_ENABLE;
2620 else
2621 ocp_data &= ~U2P3_ENABLE;
2622 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2623}
2624
2625static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2626{
2627 u32 ocp_data;
2628
2629 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2630 if (enable)
2631 ocp_data |= PWR_EN | PHASE2_EN;
2632 else
2633 ocp_data &= ~(PWR_EN | PHASE2_EN);
2634 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2635
2636 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2637 ocp_data &= ~PCUT_STATUS;
2638 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2639}
2640
2641static void r8153_first_init(struct r8152 *tp) 2652static void r8153_first_init(struct r8152 *tp)
2642{ 2653{
2643 u32 ocp_data; 2654 u32 ocp_data;
@@ -2780,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
2780 r8153_disable_aldps(tp); 2791 r8153_disable_aldps(tp);
2781 rtl_disable(tp); 2792 rtl_disable(tp);
2782 r8153_enable_aldps(tp); 2793 r8153_enable_aldps(tp);
2794 usb_enable_lpm(tp->udev);
2783} 2795}
2784 2796
2785static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2797static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2900,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
2900 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2912 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2901 return; 2913 return;
2902 2914
2915 r8153_u1u2en(tp, false);
2903 r8153_disable_aldps(tp); 2916 r8153_disable_aldps(tp);
2904 r8153_first_init(tp); 2917 r8153_first_init(tp);
2905 r8153_enable_aldps(tp); 2918 r8153_enable_aldps(tp);
2919 r8153_u2p3en(tp, true);
2920 r8153_u1u2en(tp, true);
2921 usb_enable_lpm(tp->udev);
2906} 2922}
2907 2923
2908static void rtl8153_down(struct r8152 *tp) 2924static void rtl8153_down(struct r8152 *tp)
@@ -2913,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
2913 } 2929 }
2914 2930
2915 r8153_u1u2en(tp, false); 2931 r8153_u1u2en(tp, false);
2932 r8153_u2p3en(tp, false);
2916 r8153_power_cut_en(tp, false); 2933 r8153_power_cut_en(tp, false);
2917 r8153_disable_aldps(tp); 2934 r8153_disable_aldps(tp);
2918 r8153_enter_oob(tp); 2935 r8153_enter_oob(tp);
@@ -2931,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
2931 if (!netif_carrier_ok(netdev)) { 2948 if (!netif_carrier_ok(netdev)) {
2932 tp->rtl_ops.enable(tp); 2949 tp->rtl_ops.enable(tp);
2933 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2950 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2951 napi_disable(&tp->napi);
2934 netif_carrier_on(netdev); 2952 netif_carrier_on(netdev);
2935 rtl_start_rx(tp); 2953 rtl_start_rx(tp);
2954 napi_enable(&tp->napi);
2936 } 2955 }
2937 } else { 2956 } else {
2938 if (netif_carrier_ok(netdev)) { 2957 if (netif_carrier_ok(netdev)) {
@@ -3251,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
3251 msleep(20); 3270 msleep(20);
3252 } 3271 }
3253 3272
3273 usb_disable_lpm(tp->udev);
3254 r8153_u2p3en(tp, false); 3274 r8153_u2p3en(tp, false);
3255 3275
3256 if (tp->version == RTL_VER_04) { 3276 if (tp->version == RTL_VER_04) {
@@ -3318,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
3318 r8153_enable_aldps(tp); 3338 r8153_enable_aldps(tp);
3319 r8152b_enable_fc(tp); 3339 r8152b_enable_fc(tp);
3320 rtl_tally_reset(tp); 3340 rtl_tally_reset(tp);
3341 r8153_u2p3en(tp, true);
3342}
3343
3344static int rtl8152_pre_reset(struct usb_interface *intf)
3345{
3346 struct r8152 *tp = usb_get_intfdata(intf);
3347 struct net_device *netdev;
3348
3349 if (!tp)
3350 return 0;
3351
3352 netdev = tp->netdev;
3353 if (!netif_running(netdev))
3354 return 0;
3355
3356 napi_disable(&tp->napi);
3357 clear_bit(WORK_ENABLE, &tp->flags);
3358 usb_kill_urb(tp->intr_urb);
3359 cancel_delayed_work_sync(&tp->schedule);
3360 if (netif_carrier_ok(netdev)) {
3361 netif_stop_queue(netdev);
3362 mutex_lock(&tp->control);
3363 tp->rtl_ops.disable(tp);
3364 mutex_unlock(&tp->control);
3365 }
3366
3367 return 0;
3368}
3369
3370static int rtl8152_post_reset(struct usb_interface *intf)
3371{
3372 struct r8152 *tp = usb_get_intfdata(intf);
3373 struct net_device *netdev;
3374
3375 if (!tp)
3376 return 0;
3377
3378 netdev = tp->netdev;
3379 if (!netif_running(netdev))
3380 return 0;
3381
3382 set_bit(WORK_ENABLE, &tp->flags);
3383 if (netif_carrier_ok(netdev)) {
3384 mutex_lock(&tp->control);
3385 tp->rtl_ops.enable(tp);
3386 rtl8152_set_rx_mode(netdev);
3387 mutex_unlock(&tp->control);
3388 netif_wake_queue(netdev);
3389 }
3390
3391 napi_enable(&tp->napi);
3392
3393 return 0;
3321} 3394}
3322 3395
3323static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3396static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3373,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
3373 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3446 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3374 rtl_runtime_suspend_enable(tp, false); 3447 rtl_runtime_suspend_enable(tp, false);
3375 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3448 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3449 napi_disable(&tp->napi);
3376 set_bit(WORK_ENABLE, &tp->flags); 3450 set_bit(WORK_ENABLE, &tp->flags);
3377 if (netif_carrier_ok(tp->netdev)) 3451 if (netif_carrier_ok(tp->netdev))
3378 rtl_start_rx(tp); 3452 rtl_start_rx(tp);
3453 napi_enable(&tp->napi);
3379 } else { 3454 } else {
3380 tp->rtl_ops.up(tp); 3455 tp->rtl_ops.up(tp);
3381 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3456 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3402,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3402 if (usb_autopm_get_interface(tp->intf) < 0) 3477 if (usb_autopm_get_interface(tp->intf) < 0)
3403 return; 3478 return;
3404 3479
3405 mutex_lock(&tp->control); 3480 if (!rtl_can_wakeup(tp)) {
3406 3481 wol->supported = 0;
3407 wol->supported = WAKE_ANY; 3482 wol->wolopts = 0;
3408 wol->wolopts = __rtl_get_wol(tp); 3483 } else {
3409 3484 mutex_lock(&tp->control);
3410 mutex_unlock(&tp->control); 3485 wol->supported = WAKE_ANY;
3486 wol->wolopts = __rtl_get_wol(tp);
3487 mutex_unlock(&tp->control);
3488 }
3411 3489
3412 usb_autopm_put_interface(tp->intf); 3490 usb_autopm_put_interface(tp->intf);
3413} 3491}
@@ -3417,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3417 struct r8152 *tp = netdev_priv(dev); 3495 struct r8152 *tp = netdev_priv(dev);
3418 int ret; 3496 int ret;
3419 3497
3498 if (!rtl_can_wakeup(tp))
3499 return -EOPNOTSUPP;
3500
3420 ret = usb_autopm_get_interface(tp->intf); 3501 ret = usb_autopm_get_interface(tp->intf);
3421 if (ret < 0) 3502 if (ret < 0)
3422 goto out_set_wol; 3503 goto out_set_wol;
@@ -4058,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
4058 goto out1; 4139 goto out1;
4059 } 4140 }
4060 4141
4142 if (!rtl_can_wakeup(tp))
4143 __rtl_set_wol(tp, 0);
4144
4061 tp->saved_wolopts = __rtl_get_wol(tp); 4145 tp->saved_wolopts = __rtl_get_wol(tp);
4062 if (tp->saved_wolopts) 4146 if (tp->saved_wolopts)
4063 device_set_wakeup_enable(&udev->dev, true); 4147 device_set_wakeup_enable(&udev->dev, true);
@@ -4117,6 +4201,7 @@ static struct usb_device_id rtl8152_table[] = {
4117 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 4201 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
4118 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 4202 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
4119 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 4203 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
4204 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
4120 {} 4205 {}
4121}; 4206};
4122 4207
@@ -4130,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
4130 .suspend = rtl8152_suspend, 4215 .suspend = rtl8152_suspend,
4131 .resume = rtl8152_resume, 4216 .resume = rtl8152_resume,
4132 .reset_resume = rtl8152_resume, 4217 .reset_resume = rtl8152_resume,
4218 .pre_reset = rtl8152_pre_reset,
4219 .post_reset = rtl8152_post_reset,
4133 .supports_autosuspend = 1, 4220 .supports_autosuspend = 1,
4134 .disable_hub_initiated_lpm = 1, 4221 .disable_hub_initiated_lpm = 1,
4135}; 4222};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1756 /* Do we support "hardware" checksums? */ 1756 /* Do we support "hardware" checksums? */
1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1758 /* This opens up the world of extra features. */ 1758 /* This opens up the world of extra features. */
1759 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1759 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1760 if (csum) 1760 if (csum)
1761 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1761 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1762 1762
1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1828 else 1828 else
1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1829 vi->hdr_len = sizeof(struct virtio_net_hdr);
1830 1830
1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
1832 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1832 vi->any_header_sg = true; 1833 vi->any_header_sg = true;
1833 1834
1834 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index da11bb5e9c7f..46f4caddccbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1216,7 +1216,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1216 static const u32 rxprod_reg[2] = { 1216 static const u32 rxprod_reg[2] = {
1217 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2 1217 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1218 }; 1218 };
1219 u32 num_rxd = 0; 1219 u32 num_pkts = 0;
1220 bool skip_page_frags = false; 1220 bool skip_page_frags = false;
1221 struct Vmxnet3_RxCompDesc *rcd; 1221 struct Vmxnet3_RxCompDesc *rcd;
1222 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1222 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1235,13 +1235,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1235 struct Vmxnet3_RxDesc *rxd; 1235 struct Vmxnet3_RxDesc *rxd;
1236 u32 idx, ring_idx; 1236 u32 idx, ring_idx;
1237 struct vmxnet3_cmd_ring *ring = NULL; 1237 struct vmxnet3_cmd_ring *ring = NULL;
1238 if (num_rxd >= quota) { 1238 if (num_pkts >= quota) {
1239 /* we may stop even before we see the EOP desc of 1239 /* we may stop even before we see the EOP desc of
1240 * the current pkt 1240 * the current pkt
1241 */ 1241 */
1242 break; 1242 break;
1243 } 1243 }
1244 num_rxd++;
1245 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); 1244 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1246 idx = rcd->rxdIdx; 1245 idx = rcd->rxdIdx;
1247 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; 1246 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
@@ -1413,6 +1412,7 @@ not_lro:
1413 napi_gro_receive(&rq->napi, skb); 1412 napi_gro_receive(&rq->napi, skb);
1414 1413
1415 ctx->skb = NULL; 1414 ctx->skb = NULL;
1415 num_pkts++;
1416 } 1416 }
1417 1417
1418rcd_done: 1418rcd_done:
@@ -1443,7 +1443,7 @@ rcd_done:
1443 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1443 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1444 } 1444 }
1445 1445
1446 return num_rxd; 1446 return num_pkts;
1447} 1447}
1448 1448
1449 1449
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
589 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
590 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
591 chan->netdev->dma = chan->cosa->dma; 591 chan->netdev->dma = chan->cosa->dma;
592 if (register_hdlc_device(chan->netdev)) { 592 err = register_hdlc_device(chan->netdev);
593 if (err) {
593 netdev_warn(chan->netdev, 594 netdev_warn(chan->netdev,
594 "register_hdlc_device() failed\n"); 595 "register_hdlc_device() failed\n");
595 free_netdev(chan->netdev); 596 free_netdev(chan->netdev);
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index feacc3b994b7..2f0bd6955f33 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1044,7 +1044,7 @@ EXPORT_SYMBOL(z8530_sync_dma_close);
1044 * @dev: The network device to attach 1044 * @dev: The network device to attach
1045 * @c: The Z8530 channel to configure in sync DMA mode. 1045 * @c: The Z8530 channel to configure in sync DMA mode.
1046 * 1046 *
1047 * Set up a Z85x30 device for synchronous DMA tranmission. One 1047 * Set up a Z85x30 device for synchronous DMA transmission. One
1048 * ISA DMA channel must be available for this to work. The receive 1048 * ISA DMA channel must be available for this to work. The receive
1049 * side is run in PIO mode, but then it has the bigger FIFO. 1049 * side is run in PIO mode, but then it has the bigger FIFO.
1050 */ 1050 */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
279 return; 279 return;
280 case AR9300_DEVID_QCA956X: 280 case AR9300_DEVID_QCA956X:
281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
282 return;
282 } 283 }
283 284
284 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
3728 switch (phy->rev) { 3728 switch (phy->rev) {
3729 case 6: 3729 case 6:
3730 case 5: 3730 case 5:
3731 if (sprom->fem.ghz5.extpa_gain == 3) 3731 if (sprom->fem.ghz2.extpa_gain == 3)
3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; 3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
3733 /* fall through */ 3733 /* fall through */
3734 case 4: 3734 case 4:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_QUEUE_MASK 255 438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8 439#define RX_QUEUE_SIZE_LOG 8
440 440
441/*
442 * RX related structures and functions
443 */
444#define RX_FREE_BUFFERS 64
445#define RX_LOW_WATERMARK 8
446
441/** 447/**
442 * struct iwl_rb_status - reserve buffer status 448 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers 449 * host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
540 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542 542
543 /* The byte order is little endian 16 bit, meaning 214365 */ 543 /*
544 data->hw_addr[0] = hw_addr[1]; 544 * Store the MAC address from MAO section.
545 data->hw_addr[1] = hw_addr[0]; 545 * No byte swapping is required in MAO section
546 data->hw_addr[2] = hw_addr[3]; 546 */
547 data->hw_addr[3] = hw_addr[2]; 547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548 data->hw_addr[4] = hw_addr[5];
549 data->hw_addr[5] = hw_addr[4];
550 548
551 /* 549 /*
552 * Force the use of the OTP MAC address in case of reserved MAC 550 * Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
660 * iwl_umac_scan_flags 660 * iwl_umac_scan_flags
661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
662 * can be preempted by other scan requests with higher priority. 662 * can be preempted by other scan requests with higher priority.
663 * The low priority scan is aborted. 663 * The low priority scan will be resumed when the higher proirity scan is
664 * completed.
664 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
665 * when scan starts. 666 * when scan starts.
666 */ 667 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1023 cmd->scan_priority = 1023 cmd->scan_priority =
1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1025 1025
1026 if (iwl_mvm_scan_total_iterations(params) == 0) 1026 if (iwl_mvm_scan_total_iterations(params) == 1)
1027 cmd->ooc_priority = 1027 cmd->ooc_priority =
1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1029 else 1029 else
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1109 cmd->uid = cpu_to_le32(uid); 1109 cmd->uid = cpu_to_le32(uid);
1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1111 1111
1112 if (type == IWL_MVM_SCAN_SCHED)
1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1114
1112 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
1113 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1114 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1402 u8 sta_id; 1402 u8 sta_id;
1403 int ret; 1403 int ret;
1404 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1404 1405
1405 lockdep_assert_held(&mvm->mutex); 1406 lockdep_assert_held(&mvm->mutex);
1406 1407
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1467end: 1468end:
1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1469 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1470 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1470 sta->addr, ret); 1471 sta ? sta->addr : zero_addr, ret);
1471 return ret; 1472 return ret;
1472} 1473}
1473 1474
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
86{ 86{
87 lockdep_assert_held(&mvm->time_event_lock); 87 lockdep_assert_held(&mvm->time_event_lock);
88 88
89 if (te_data->id == TE_MAX) 89 if (!te_data->vif)
90 return; 90 return;
91 91
92 list_del(&te_data->list); 92 list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
252 252
253 if (info->band == IEEE80211_BAND_2GHZ && 253 if (info->band == IEEE80211_BAND_2GHZ &&
254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
255 rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
256 else 256 else
257 rate_flags = 257 rate_flags =
258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
377 379
378/* 7265 Series */ 380/* 7265 Series */
379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 428 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 429 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 433 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-op-mode.h" 45#include "iwl-op-mode.h"
46 46
47/*
48 * RX related structures and functions
49 */
50#define RX_NUM_QUEUES 1
51#define RX_POST_REQ_ALLOC 2
52#define RX_CLAIM_REQ_ALLOC 8
53#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
54#define RX_LOW_WATERMARK 8
55
56struct iwl_host_cmd; 47struct iwl_host_cmd;
57 48
58/*This file includes the declaration that are internal to the 49/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
86 * struct iwl_rxq - Rx queue 77 * struct iwl_rxq - Rx queue
87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
80 * @pool:
81 * @queue:
89 * @read: Shared index to newest available Rx buffer 82 * @read: Shared index to newest available Rx buffer
90 * @write: Shared index to oldest written Rx packet 83 * @write: Shared index to oldest written Rx packet
91 * @free_count: Number of pre-allocated buffers in rx_free 84 * @free_count: Number of pre-allocated buffers in rx_free
92 * @used_count: Number of RBDs handled to allocator to use for allocation
93 * @write_actual: 85 * @write_actual:
94 * @rx_free: list of RBDs with allocated RB ready for use 86 * @rx_free: list of free SKBs for use
95 * @rx_used: list of RBDs with no RB attached 87 * @rx_used: List of Rx buffers with no SKB
96 * @need_update: flag to indicate we need to update read/write index 88 * @need_update: flag to indicate we need to update read/write index
97 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts: driver's pointer to receive buffer status
98 * @rb_stts_dma: bus address of receive buffer status 90 * @rb_stts_dma: bus address of receive buffer status
99 * @lock: 91 * @lock:
100 * @pool: initial pool of iwl_rx_mem_buffer for the queue
101 * @queue: actual rx queue
102 * 92 *
103 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
104 */ 94 */
105struct iwl_rxq { 95struct iwl_rxq {
106 __le32 *bd; 96 __le32 *bd;
107 dma_addr_t bd_dma; 97 dma_addr_t bd_dma;
98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
108 u32 read; 100 u32 read;
109 u32 write; 101 u32 write;
110 u32 free_count; 102 u32 free_count;
111 u32 used_count;
112 u32 write_actual; 103 u32 write_actual;
113 struct list_head rx_free; 104 struct list_head rx_free;
114 struct list_head rx_used; 105 struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
116 struct iwl_rb_status *rb_stts; 107 struct iwl_rb_status *rb_stts;
117 dma_addr_t rb_stts_dma; 108 dma_addr_t rb_stts_dma;
118 spinlock_t lock; 109 spinlock_t lock;
119 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
120 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
121};
122
123/**
124 * struct iwl_rb_allocator - Rx allocator
125 * @pool: initial pool of allocator
126 * @req_pending: number of requests the allcator had not processed yet
127 * @req_ready: number of requests honored and ready for claiming
128 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
129 * the queue. This is a list of &struct iwl_rx_mem_buffer
130 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
131 * of &struct iwl_rx_mem_buffer
132 * @lock: protects the rbd_allocated and rbd_empty lists
133 * @alloc_wq: work queue for background calls
134 * @rx_alloc: work struct for background calls
135 */
136struct iwl_rb_allocator {
137 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
138 atomic_t req_pending;
139 atomic_t req_ready;
140 struct list_head rbd_allocated;
141 struct list_head rbd_empty;
142 spinlock_t lock;
143 struct workqueue_struct *alloc_wq;
144 struct work_struct rx_alloc;
145}; 110};
146 111
147struct iwl_dma_ptr { 112struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
285/** 250/**
286 * struct iwl_trans_pcie - PCIe transport specific data 251 * struct iwl_trans_pcie - PCIe transport specific data
287 * @rxq: all the RX queue data 252 * @rxq: all the RX queue data
288 * @rba: allocator for RX replenishing 253 * @rx_replenish: work that will be called when buffers need to be allocated
289 * @drv - pointer to iwl_drv 254 * @drv - pointer to iwl_drv
290 * @trans: pointer to the generic transport area 255 * @trans: pointer to the generic transport area
291 * @scd_base_addr: scheduler sram base address in SRAM 256 * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
308 */ 273 */
309struct iwl_trans_pcie { 274struct iwl_trans_pcie {
310 struct iwl_rxq rxq; 275 struct iwl_rxq rxq;
311 struct iwl_rb_allocator rba; 276 struct work_struct rx_replenish;
312 struct iwl_trans *trans; 277 struct iwl_trans *trans;
313 struct iwl_drv *drv; 278 struct iwl_drv *drv;
314 279
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
74 * resets the Rx queue buffers with new memory. 74 * resets the Rx queue buffers with new memory.
75 * 75 *
76 * The management in the driver is as follows: 76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * When the interrupt handler is called, the request is processed. 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * The page is either stolen - transferred to the upper layer 79 * to replenish the iwl->rxq->rx_free.
80 * or reused - added immediately to the iwl->rxq->rx_free list. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * + When the page is stolen - the driver updates the matching queue's used 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * count, detaches the RBD and transfers it to the queue used list. 82 * 'processed' and 'read' driver indexes as well)
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack, 83 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated. 84 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free, 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared. 88 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 * 89 *
@@ -105,32 +92,18 @@
105 * 92 *
106 * iwl_rxq_alloc() Allocates rx_free 93 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock. 95 * iwl_pcie_rxq_restock
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates 97 * queue, updates firmware pointers, and updates
112 * the WRITE index. 98 * the WRITE index. If insufficient rx_free buffers
113 * iwl_pcie_rx_allocator() Background work for allocating pages. 99 * are available, schedules iwl_pcie_rx_replenish
114 * 100 *
115 * -- enable interrupts -- 101 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool. 103 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used. 104 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty 105 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots. 106 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ... 107 * ...
135 * 108 *
136 */ 109 */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
267 rxq->free_count--; 240 rxq->free_count--;
268 } 241 }
269 spin_unlock(&rxq->lock); 242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
270 247
271 /* If we've added more space for the firmware to place data, tell it. 248 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */ 249 * Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
278} 255}
279 256
280/* 257/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
285{
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 struct iwl_rxq *rxq = &trans_pcie->rxq;
288 struct page *page;
289 gfp_t gfp_mask = GFP_KERNEL;
290
291 if (rxq->free_count > RX_LOW_WATERMARK)
292 gfp_mask |= __GFP_NOWARN;
293
294 if (trans_pcie->rx_page_order > 0)
295 gfp_mask |= __GFP_COMP;
296
297 /* Alloc a new receive buffer */
298 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
299 if (!page) {
300 if (net_ratelimit())
301 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
302 trans_pcie->rx_page_order);
303 /* Issue an error if the hardware has consumed more than half
304 * of its free buffer list and we don't have enough
305 * pre-allocated buffers.
306` */
307 if (rxq->free_count <= RX_LOW_WATERMARK &&
308 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
309 net_ratelimit())
310 IWL_CRIT(trans,
311 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
312 rxq->free_count);
313 return NULL;
314 }
315 return page;
316}
317
318/*
319 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
320 * 259 *
321 * A used RBD is an Rx buffer that has been given to the stack. To use it again 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
325 * allocated buffers. 264 * allocated buffers.
326 */ 265 */
327static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
328{ 267{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 struct iwl_rxq *rxq = &trans_pcie->rxq; 269 struct iwl_rxq *rxq = &trans_pcie->rxq;
331 struct iwl_rx_mem_buffer *rxb; 270 struct iwl_rx_mem_buffer *rxb;
332 struct page *page; 271 struct page *page;
272 gfp_t gfp_mask = priority;
333 273
334 while (1) { 274 while (1) {
335 spin_lock(&rxq->lock); 275 spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
339 } 279 }
340 spin_unlock(&rxq->lock); 280 spin_unlock(&rxq->lock);
341 281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
342 /* Alloc a new receive buffer */ 288 /* Alloc a new receive buffer */
343 page = iwl_pcie_rx_alloc_page(trans); 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
344 if (!page) 290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
345 return; 306 return;
307 }
346 308
347 spin_lock(&rxq->lock); 309 spin_lock(&rxq->lock);
348 310
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
393 355
394 lockdep_assert_held(&rxq->lock); 356 lockdep_assert_held(&rxq->lock);
395 357
396 for (i = 0; i < RX_QUEUE_SIZE; i++) { 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
397 if (!rxq->pool[i].page) 359 if (!rxq->pool[i].page)
398 continue; 360 continue;
399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
410 * When moving to rx_free an page is allocated for the slot. 372 * When moving to rx_free an page is allocated for the slot.
411 * 373 *
412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
413 * This is called only during initialization 375 * This is called as a scheduled work item (except for during initialization)
414 */ 376 */
415static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
416{ 378{
417 iwl_pcie_rxq_alloc_rbs(trans); 379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
418 380
419 iwl_pcie_rxq_restock(trans); 381 iwl_pcie_rxq_restock(trans);
420} 382}
421 383
422/* 384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
423 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
424 *
425 * Allocates for each received request 8 pages
426 * Called as a scheduled work item.
427 */
428static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 struct iwl_rb_allocator *rba = &trans_pcie->rba;
432
433 while (atomic_read(&rba->req_pending)) {
434 int i;
435 struct list_head local_empty;
436 struct list_head local_allocated;
437
438 INIT_LIST_HEAD(&local_allocated);
439 spin_lock(&rba->lock);
440 /* swap out the entire rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
445 struct iwl_rx_mem_buffer *rxb;
446 struct page *page;
447
448 /* List should never be empty - each reused RBD is
449 * returned to the list, and initial pool covers any
450 * possible gap between the time the page is allocated
451 * to the time the RBD is added.
452 */
453 BUG_ON(list_empty(&local_empty));
454 /* Get the first rxb from the rbd list */
455 rxb = list_first_entry(&local_empty,
456 struct iwl_rx_mem_buffer, list);
457 BUG_ON(rxb->page);
458
459 /* Alloc a new receive buffer */
460 page = iwl_pcie_rx_alloc_page(trans);
461 if (!page)
462 continue;
463 rxb->page = page;
464
465 /* Get physical address of the RB */
466 rxb->page_dma = dma_map_page(trans->dev, page, 0,
467 PAGE_SIZE << trans_pcie->rx_page_order,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
470 rxb->page = NULL;
471 __free_pages(page, trans_pcie->rx_page_order);
472 continue;
473 }
474 /* dma address must be no more than 36 bits */
475 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
476 /* and also 256 byte aligned! */
477 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
478
479 /* move the allocated entry to the out list */
480 list_move(&rxb->list, &local_allocated);
481 i++;
482 }
483
484 spin_lock(&rba->lock);
485 /* add the allocated rbds to the allocator allocated list */
486 list_splice_tail(&local_allocated, &rba->rbd_allocated);
487 /* add the unused rbds back to the allocator empty list */
488 list_splice_tail(&local_empty, &rba->rbd_empty);
489 spin_unlock(&rba->lock);
490
491 atomic_dec(&rba->req_pending);
492 atomic_inc(&rba->req_ready);
493 }
494}
495
496/*
497 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
498.*
499.* Called by queue when the queue posted allocation request and
500 * has freed 8 RBDs in order to restock itself.
501 */
502static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
503 struct iwl_rx_mem_buffer
504 *out[RX_CLAIM_REQ_ALLOC])
505{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_rb_allocator *rba = &trans_pcie->rba;
508 int i;
509
510 if (atomic_dec_return(&rba->req_ready) < 0) {
511 atomic_inc(&rba->req_ready);
512 IWL_DEBUG_RX(trans,
513 "Allocation request not ready, pending requests = %d\n",
514 atomic_read(&rba->req_pending));
515 return -ENOMEM;
516 }
517
518 spin_lock(&rba->lock);
519 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
520 /* Get next free Rx buffer, remove it from free list */
521 out[i] = list_first_entry(&rba->rbd_allocated,
522 struct iwl_rx_mem_buffer, list);
523 list_del(&out[i]->list);
524 }
525 spin_unlock(&rba->lock);
526
527 return 0;
528}
529
530static void iwl_pcie_rx_allocator_work(struct work_struct *data)
531{ 385{
532 struct iwl_rb_allocator *rba_p =
533 container_of(data, struct iwl_rb_allocator, rx_alloc);
534 struct iwl_trans_pcie *trans_pcie = 386 struct iwl_trans_pcie *trans_pcie =
535 container_of(rba_p, struct iwl_trans_pcie, rba); 387 container_of(data, struct iwl_trans_pcie, rx_replenish);
536 388
537 iwl_pcie_rx_allocator(trans_pcie->trans); 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
538} 390}
539 391
540static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
541{ 393{
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 struct iwl_rxq *rxq = &trans_pcie->rxq; 395 struct iwl_rxq *rxq = &trans_pcie->rxq;
544 struct iwl_rb_allocator *rba = &trans_pcie->rba;
545 struct device *dev = trans->dev; 396 struct device *dev = trans->dev;
546 397
547 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
548 399
549 spin_lock_init(&rxq->lock); 400 spin_lock_init(&rxq->lock);
550 spin_lock_init(&rba->lock);
551 401
552 if (WARN_ON(rxq->bd || rxq->rb_stts)) 402 if (WARN_ON(rxq->bd || rxq->rb_stts))
553 return -EINVAL; 403 return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
637 INIT_LIST_HEAD(&rxq->rx_free); 487 INIT_LIST_HEAD(&rxq->rx_free);
638 INIT_LIST_HEAD(&rxq->rx_used); 488 INIT_LIST_HEAD(&rxq->rx_used);
639 rxq->free_count = 0; 489 rxq->free_count = 0;
640 rxq->used_count = 0;
641 490
642 for (i = 0; i < RX_QUEUE_SIZE; i++) 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
643 list_add(&rxq->pool[i].list, &rxq->rx_used); 492 list_add(&rxq->pool[i].list, &rxq->rx_used);
644} 493}
645 494
646static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
647{
648 int i;
649
650 lockdep_assert_held(&rba->lock);
651
652 INIT_LIST_HEAD(&rba->rbd_allocated);
653 INIT_LIST_HEAD(&rba->rbd_empty);
654
655 for (i = 0; i < RX_POOL_SIZE; i++)
656 list_add(&rba->pool[i].list, &rba->rbd_empty);
657}
658
659static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
660{
661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
662 struct iwl_rb_allocator *rba = &trans_pcie->rba;
663 int i;
664
665 lockdep_assert_held(&rba->lock);
666
667 for (i = 0; i < RX_POOL_SIZE; i++) {
668 if (!rba->pool[i].page)
669 continue;
670 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
671 PAGE_SIZE << trans_pcie->rx_page_order,
672 DMA_FROM_DEVICE);
673 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
674 rba->pool[i].page = NULL;
675 }
676}
677
678int iwl_pcie_rx_init(struct iwl_trans *trans) 495int iwl_pcie_rx_init(struct iwl_trans *trans)
679{ 496{
680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681 struct iwl_rxq *rxq = &trans_pcie->rxq; 498 struct iwl_rxq *rxq = &trans_pcie->rxq;
682 struct iwl_rb_allocator *rba = &trans_pcie->rba;
683 int i, err; 499 int i, err;
684 500
685 if (!rxq->bd) { 501 if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
687 if (err) 503 if (err)
688 return err; 504 return err;
689 } 505 }
690 if (!rba->alloc_wq)
691 rba->alloc_wq = alloc_workqueue("rb_allocator",
692 WQ_HIGHPRI | WQ_UNBOUND, 1);
693 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
694
695 spin_lock(&rba->lock);
696 atomic_set(&rba->req_pending, 0);
697 atomic_set(&rba->req_ready, 0);
698 /* free all first - we might be reconfigured for a different size */
699 iwl_pcie_rx_free_rba(trans);
700 iwl_pcie_rx_init_rba(rba);
701 spin_unlock(&rba->lock);
702 506
703 spin_lock(&rxq->lock); 507 spin_lock(&rxq->lock);
704 508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
705 /* free all first - we might be reconfigured for a different size */ 511 /* free all first - we might be reconfigured for a different size */
706 iwl_pcie_rxq_free_rbs(trans); 512 iwl_pcie_rxq_free_rbs(trans);
707 iwl_pcie_rx_init_rxb_lists(rxq); 513 iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
717 spin_unlock(&rxq->lock); 523 spin_unlock(&rxq->lock);
718 524
719 iwl_pcie_rx_replenish(trans); 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
720 526
721 iwl_pcie_rx_hw_init(trans, rxq); 527 iwl_pcie_rx_hw_init(trans, rxq);
722 528
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
731{ 537{
732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
733 struct iwl_rxq *rxq = &trans_pcie->rxq; 539 struct iwl_rxq *rxq = &trans_pcie->rxq;
734 struct iwl_rb_allocator *rba = &trans_pcie->rba;
735 540
736 /*if rxq->bd is NULL, it means that nothing has been allocated, 541 /*if rxq->bd is NULL, it means that nothing has been allocated,
737 * exit now */ 542 * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
740 return; 545 return;
741 } 546 }
742 547
743 cancel_work_sync(&rba->rx_alloc); 548 cancel_work_sync(&trans_pcie->rx_replenish);
744 if (rba->alloc_wq) {
745 destroy_workqueue(rba->alloc_wq);
746 rba->alloc_wq = NULL;
747 }
748
749 spin_lock(&rba->lock);
750 iwl_pcie_rx_free_rba(trans);
751 spin_unlock(&rba->lock);
752 549
753 spin_lock(&rxq->lock); 550 spin_lock(&rxq->lock);
754 iwl_pcie_rxq_free_rbs(trans); 551 iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
769 rxq->rb_stts = NULL; 566 rxq->rb_stts = NULL;
770} 567}
771 568
772/*
773 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
774 *
775 * Called when a RBD can be reused. The RBD is transferred to the allocator.
776 * When there are 2 empty RBDs - a request for allocation is posted
777 */
778static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
779 struct iwl_rx_mem_buffer *rxb,
780 struct iwl_rxq *rxq)
781{
782 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 struct iwl_rb_allocator *rba = &trans_pcie->rba;
784
785 /* Count the used RBDs */
786 rxq->used_count++;
787
788 /* Move the RBD to the used list, will be moved to allocator in batches
789 * before claiming or posting a request*/
790 list_add_tail(&rxb->list, &rxq->rx_used);
791
792 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
793 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
794 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
795 * after but we still need to post another request.
796 */
797 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
798 /* Move the 2 RBDs to the allocator ownership.
799 Allocator has another 6 from pool for the request completion*/
800 spin_lock(&rba->lock);
801 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
802 spin_unlock(&rba->lock);
803
804 atomic_inc(&rba->req_pending);
805 queue_work(rba->alloc_wq, &rba->rx_alloc);
806 }
807}
808
809static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb) 570 struct iwl_rx_mem_buffer *rxb)
811{ 571{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
928 */ 688 */
929 __free_pages(rxb->page, trans_pcie->rx_page_order); 689 __free_pages(rxb->page, trans_pcie->rx_page_order);
930 rxb->page = NULL; 690 rxb->page = NULL;
931 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 691 list_add_tail(&rxb->list, &rxq->rx_used);
932 } else { 692 } else {
933 list_add_tail(&rxb->list, &rxq->rx_free); 693 list_add_tail(&rxb->list, &rxq->rx_free);
934 rxq->free_count++; 694 rxq->free_count++;
935 } 695 }
936 } else 696 } else
937 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 697 list_add_tail(&rxb->list, &rxq->rx_used);
938} 698}
939 699
940/* 700/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
944{ 704{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 struct iwl_rxq *rxq = &trans_pcie->rxq; 706 struct iwl_rxq *rxq = &trans_pcie->rxq;
947 u32 r, i, j; 707 u32 r, i;
708 u8 fill_rx = 0;
709 u32 count = 8;
710 int total_empty;
948 711
949restart: 712restart:
950 spin_lock(&rxq->lock); 713 spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
957 if (i == r) 720 if (i == r)
958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
959 722
723 /* calculate total frames need to be restock after handling RX */
724 total_empty = r - rxq->write_actual;
725 if (total_empty < 0)
726 total_empty += RX_QUEUE_SIZE;
727
728 if (total_empty > (RX_QUEUE_SIZE / 2))
729 fill_rx = 1;
730
960 while (i != r) { 731 while (i != r) {
961 struct iwl_rx_mem_buffer *rxb; 732 struct iwl_rx_mem_buffer *rxb;
962 733
@@ -968,48 +739,29 @@ restart:
968 iwl_pcie_rx_handle_rb(trans, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb);
969 740
970 i = (i + 1) & RX_QUEUE_MASK; 741 i = (i + 1) & RX_QUEUE_MASK;
971 742 /* If there are a lot of unused frames,
972 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 743 * restock the Rx queue so ucode wont assert. */
973 * try to claim the pre-allocated buffers from the allocator */ 744 if (fill_rx) {
974 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 745 count++;
975 struct iwl_rb_allocator *rba = &trans_pcie->rba; 746 if (count >= 8) {
976 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 747 rxq->read = i;
977 748 spin_unlock(&rxq->lock);
978 /* Add the remaining 6 empty RBDs for allocator use */ 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
979 spin_lock(&rba->lock); 750 count = 0;
980 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 751 goto restart;
981 spin_unlock(&rba->lock);
982
983 /* If not ready - continue, will try to reclaim later.
984 * No need to reschedule work - allocator exits only on
985 * success */
986 if (!iwl_pcie_rx_allocator_get(trans, out)) {
987 /* If success - then RX_CLAIM_REQ_ALLOC
988 * buffers were retrieved and should be added
989 * to free list */
990 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
991 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
992 list_add_tail(&out[j]->list,
993 &rxq->rx_free);
994 rxq->free_count++;
995 }
996 } 752 }
997 } 753 }
998 /* handle restock for two cases:
999 * - we just pulled buffers from the allocator
1000 * - we have 8+ unstolen pages accumulated */
1001 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1002 rxq->read = i;
1003 spin_unlock(&rxq->lock);
1004 iwl_pcie_rxq_restock(trans);
1005 goto restart;
1006 }
1007 } 754 }
1008 755
1009 /* Backtrack one entry */ 756 /* Backtrack one entry */
1010 rxq->read = i; 757 rxq->read = i;
1011 spin_unlock(&rxq->lock); 758 spin_unlock(&rxq->lock);
1012 759
760 if (fill_rx)
761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
762 else
763 iwl_pcie_rxq_restock(trans);
764
1013 if (trans_pcie->napi.poll) 765 if (trans_pcie->napi.poll)
1014 napi_gro_flush(&trans_pcie->napi, false); 766 napi_gro_flush(&trans_pcie->napi, false);
1015} 767}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
182 182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{ 184{
185 if (!trans->cfg->apmg_not_supported) 185 if (trans->cfg->apmg_not_supported)
186 return; 186 return;
187 187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME); 480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
482 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED);
482 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 484 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
483 CSR_HW_IF_CONFIG_REG_PREPARE | 485 CSR_HW_IF_CONFIG_REG_PREPARE |
484 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 486 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 mdelay(1);
488 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED);
490 }
485 mdelay(5); 491 mdelay(5);
486 } 492 }
487 493
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
575 if (ret >= 0) 581 if (ret >= 0)
576 return 0; 582 return 0;
577 583
584 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED);
586 msleep(1);
587
578 for (iter = 0; iter < 10; iter++) { 588 for (iter = 0; iter < 10; iter++) {
579 /* If HW is not ready, prepare the conditions to check again */ 589 /* If HW is not ready, prepare the conditions to check again */
580 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
582 592
583 do { 593 do {
584 ret = iwl_pcie_set_hw_ready(trans); 594 ret = iwl_pcie_set_hw_ready(trans);
585 if (ret >= 0) 595 if (ret >= 0) {
586 return 0; 596 ret = 0;
597 goto out;
598 }
587 599
588 usleep_range(200, 1000); 600 usleep_range(200, 1000);
589 t += 200; 601 t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
593 605
594 IWL_ERR(trans, "Couldn't prepare the card\n"); 606 IWL_ERR(trans, "Couldn't prepare the card\n");
595 607
608out:
609 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
610 CSR_RESET_LINK_PWR_MGMT_DISABLED);
611
596 return ret; 612 return ret;
597} 613}
598 614
@@ -2459,7 +2475,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2459 struct iwl_trans_pcie *trans_pcie; 2475 struct iwl_trans_pcie *trans_pcie;
2460 struct iwl_trans *trans; 2476 struct iwl_trans *trans;
2461 u16 pci_cmd; 2477 u16 pci_cmd;
2462 int err; 2478 int ret;
2463 2479
2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2480 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2465 &pdev->dev, cfg, &trans_ops_pcie, 0); 2481 &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2490,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2474 spin_lock_init(&trans_pcie->ref_lock); 2490 spin_lock_init(&trans_pcie->ref_lock);
2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2491 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2476 2492
2477 err = pci_enable_device(pdev); 2493 ret = pci_enable_device(pdev);
2478 if (err) 2494 if (ret)
2479 goto out_no_pci; 2495 goto out_no_pci;
2480 2496
2481 if (!cfg->base_params->pcie_l1_allowed) { 2497 if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2507,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2491 2507
2492 pci_set_master(pdev); 2508 pci_set_master(pdev);
2493 2509
2494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2510 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2495 if (!err) 2511 if (!ret)
2496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2512 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2497 if (err) { 2513 if (ret) {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2514 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (!err) 2515 if (!ret)
2500 err = pci_set_consistent_dma_mask(pdev, 2516 ret = pci_set_consistent_dma_mask(pdev,
2501 DMA_BIT_MASK(32)); 2517 DMA_BIT_MASK(32));
2502 /* both attempts failed: */ 2518 /* both attempts failed: */
2503 if (err) { 2519 if (ret) {
2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2520 dev_err(&pdev->dev, "No suitable DMA available\n");
2505 goto out_pci_disable_device; 2521 goto out_pci_disable_device;
2506 } 2522 }
2507 } 2523 }
2508 2524
2509 err = pci_request_regions(pdev, DRV_NAME); 2525 ret = pci_request_regions(pdev, DRV_NAME);
2510 if (err) { 2526 if (ret) {
2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2527 dev_err(&pdev->dev, "pci_request_regions failed\n");
2512 goto out_pci_disable_device; 2528 goto out_pci_disable_device;
2513 } 2529 }
@@ -2515,7 +2531,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2531 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2516 if (!trans_pcie->hw_base) { 2532 if (!trans_pcie->hw_base) {
2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2533 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2518 err = -ENODEV; 2534 ret = -ENODEV;
2519 goto out_pci_release_regions; 2535 goto out_pci_release_regions;
2520 } 2536 }
2521 2537
@@ -2527,9 +2543,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2527 trans_pcie->pci_dev = pdev; 2543 trans_pcie->pci_dev = pdev;
2528 iwl_disable_interrupts(trans); 2544 iwl_disable_interrupts(trans);
2529 2545
2530 err = pci_enable_msi(pdev); 2546 ret = pci_enable_msi(pdev);
2531 if (err) { 2547 if (ret) {
2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2548 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2533 /* enable rfkill interrupt: hw bug w/a */ 2549 /* enable rfkill interrupt: hw bug w/a */
2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2550 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2551 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2563,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2547 */ 2563 */
2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2564 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2549 unsigned long flags; 2565 unsigned long flags;
2550 int ret;
2551 2566
2552 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2567 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2553 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2568 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2554 2569
2570 ret = iwl_pcie_prepare_card_hw(trans);
2571 if (ret) {
2572 IWL_WARN(trans, "Exit HW not ready\n");
2573 goto out_pci_disable_msi;
2574 }
2575
2555 /* 2576 /*
2556 * in-order to recognize C step driver should read chip version 2577 * in-order to recognize C step driver should read chip version
2557 * id located at the AUX bus MISC address space. 2578 * id located at the AUX bus MISC address space.
@@ -2591,13 +2612,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2591 /* Initialize the wait queue for commands */ 2612 /* Initialize the wait queue for commands */
2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2613 init_waitqueue_head(&trans_pcie->wait_command_queue);
2593 2614
2594 if (iwl_pcie_alloc_ict(trans)) 2615 ret = iwl_pcie_alloc_ict(trans);
2616 if (ret)
2595 goto out_pci_disable_msi; 2617 goto out_pci_disable_msi;
2596 2618
2597 err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2619 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2598 iwl_pcie_irq_handler, 2620 iwl_pcie_irq_handler,
2599 IRQF_SHARED, DRV_NAME, trans); 2621 IRQF_SHARED, DRV_NAME, trans);
2600 if (err) { 2622 if (ret) {
2601 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2623 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2602 goto out_free_ict; 2624 goto out_free_ict;
2603 } 2625 }
@@ -2617,5 +2639,5 @@ out_pci_disable_device:
2617 pci_disable_device(pdev); 2639 pci_disable_device(pdev);
2618out_no_pci: 2640out_no_pci:
2619 iwl_trans_free(trans); 2641 iwl_trans_free(trans);
2620 return ERR_PTR(err); 2642 return ERR_PTR(ret);
2621} 2643}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1875 1875
1876 /* start timer if queue currently empty */ 1876 /* start timer if queue currently empty */
1877 if (q->read_ptr == q->write_ptr) { 1877 if (q->read_ptr == q->write_ptr) {
1878 if (txq->wd_timeout) 1878 if (txq->wd_timeout) {
1879 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1879 /*
1880 * If the TXQ is active, then set the timer, if not,
1881 * set the timer in remainder so that the timer will
1882 * be armed with the right value when the station will
1883 * wake up.
1884 */
1885 if (!txq->frozen)
1886 mod_timer(&txq->stuck_timer,
1887 jiffies + txq->wd_timeout);
1888 else
1889 txq->frozen_expiry_remainder = txq->wd_timeout;
1890 }
1880 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1891 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1881 iwl_trans_pcie_ref(trans); 1892 iwl_trans_pcie_ref(trans);
1882 } 1893 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev; 172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len; 173 u32 len;
174 u32 num_blocks; 174 u32 num_blocks;
175 const u8 *fw;
175 const struct firmware *fw_entry = NULL; 176 const struct firmware *fw_entry = NULL;
176 u32 block_size = dev->tx_blk_size; 177 u32 block_size = dev->tx_blk_size;
177 int status = 0; 178 int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
200 return status; 201 return status;
201 } 202 }
202 203
204 /* Copy firmware into DMA-accessible memory */
205 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
206 if (!fw)
207 return -ENOMEM;
203 len = fw_entry->size; 208 len = fw_entry->size;
204 209
205 if (len % 4) 210 if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
210 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); 215 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
211 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 216 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
212 217
213 status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); 218 status = rsi_copy_to_card(common, fw, len, num_blocks);
219 kfree(fw);
214 release_firmware(fw_entry); 220 release_firmware(fw_entry);
215 return status; 221 return status;
216} 222}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
146 return status; 146 return status;
147 } 147 }
148 148
149 /* Copy firmware into DMA-accessible memory */
149 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); 150 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
151 if (!fw)
152 return -ENOMEM;
150 len = fw_entry->size; 153 len = fw_entry->size;
151 154
152 if (len % 4) 155 if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
158 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 161 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
159 162
160 status = rsi_copy_to_card(common, fw, len, num_blocks); 163 status = rsi_copy_to_card(common, fw, len, num_blocks);
164 kfree(fw);
161 release_firmware(fw_entry); 165 release_firmware(fw_entry);
162 return status; 166 return status;
163} 167}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
1015{ 1015{
1016 struct rtl_priv *rtlpriv = rtl_priv(hw); 1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1018 struct rtl_tcb_desc tcb_desc;
1018 1019
1019 if (skb) 1020 if (skb) {
1020 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); 1021 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1022 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
1023 }
1021} 1024}
1022 1025
1023static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1026static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
388module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 389module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
389 bool, 0444); 390 bool, 0444);
390MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 391MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{ 62{
63 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
64} 70}
65 71
66int xenvif_schedulable(struct xenvif *vif) 72int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, 810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
811 struct sk_buff *skb, 811 struct sk_buff *skb,
812 struct xen_netif_tx_request *txp, 812 struct xen_netif_tx_request *txp,
813 struct gnttab_map_grant_ref *gop) 813 struct gnttab_map_grant_ref *gop,
814 unsigned int frag_overflow,
815 struct sk_buff *nskb)
814{ 816{
815 struct skb_shared_info *shinfo = skb_shinfo(skb); 817 struct skb_shared_info *shinfo = skb_shinfo(skb);
816 skb_frag_t *frags = shinfo->frags; 818 skb_frag_t *frags = shinfo->frags;
817 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 819 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
818 int start; 820 int start;
819 pending_ring_idx_t index; 821 pending_ring_idx_t index;
820 unsigned int nr_slots, frag_overflow = 0; 822 unsigned int nr_slots;
821 823
822 /* At this point shinfo->nr_frags is in fact the number of
823 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
824 */
825 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
826 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
827 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
828 shinfo->nr_frags = MAX_SKB_FRAGS;
829 }
830 nr_slots = shinfo->nr_frags; 824 nr_slots = shinfo->nr_frags;
831 825
832 /* Skip first skb fragment if it is on same page as header fragment. */ 826 /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
841 } 835 }
842 836
843 if (frag_overflow) { 837 if (frag_overflow) {
844 struct sk_buff *nskb = xenvif_alloc_skb(0);
845 if (unlikely(nskb == NULL)) {
846 if (net_ratelimit())
847 netdev_err(queue->vif->dev,
848 "Can't allocate the frag_list skb.\n");
849 return NULL;
850 }
851 838
852 shinfo = skb_shinfo(nskb); 839 shinfo = skb_shinfo(nskb);
853 frags = shinfo->frags; 840 frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1175 unsigned *copy_ops, 1162 unsigned *copy_ops,
1176 unsigned *map_ops) 1163 unsigned *map_ops)
1177{ 1164{
1178 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; 1165 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1179 struct sk_buff *skb; 1166 struct sk_buff *skb, *nskb;
1180 int ret; 1167 int ret;
1168 unsigned int frag_overflow;
1181 1169
1182 while (skb_queue_len(&queue->tx_queue) < budget) { 1170 while (skb_queue_len(&queue->tx_queue) < budget) {
1183 struct xen_netif_tx_request txreq; 1171 struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1265 break; 1253 break;
1266 } 1254 }
1267 1255
1256 skb_shinfo(skb)->nr_frags = ret;
1257 if (data_len < txreq.size)
1258 skb_shinfo(skb)->nr_frags++;
1259 /* At this point shinfo->nr_frags is in fact the number of
1260 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1261 */
1262 frag_overflow = 0;
1263 nskb = NULL;
1264 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1265 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1266 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1267 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1268 nskb = xenvif_alloc_skb(0);
1269 if (unlikely(nskb == NULL)) {
1270 kfree_skb(skb);
1271 xenvif_tx_err(queue, &txreq, idx);
1272 if (net_ratelimit())
1273 netdev_err(queue->vif->dev,
1274 "Can't allocate the frag_list skb.\n");
1275 break;
1276 }
1277 }
1278
1268 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1269 struct xen_netif_extra_info *gso; 1280 struct xen_netif_extra_info *gso;
1270 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1272 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1273 /* Failure in xenvif_set_skb_gso is fatal. */ 1284 /* Failure in xenvif_set_skb_gso is fatal. */
1274 kfree_skb(skb); 1285 kfree_skb(skb);
1286 kfree_skb(nskb);
1275 break; 1287 break;
1276 } 1288 }
1277 } 1289 }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 1306
1295 (*copy_ops)++; 1307 (*copy_ops)++;
1296 1308
1297 skb_shinfo(skb)->nr_frags = ret;
1298 if (data_len < txreq.size) { 1309 if (data_len < txreq.size) {
1299 skb_shinfo(skb)->nr_frags++;
1300 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1310 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1301 pending_idx); 1311 pending_idx);
1302 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1312 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1310 1320
1311 queue->pending_cons++; 1321 queue->pending_cons++;
1312 1322
1313 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); 1323 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1314 if (request_gop == NULL) { 1324 frag_overflow, nskb);
1315 kfree_skb(skb);
1316 xenvif_tx_err(queue, &txreq, idx);
1317 break;
1318 }
1319 gop = request_gop;
1320 1325
1321 __skb_queue_tail(&queue->tx_queue, skb); 1326 __skb_queue_tail(&queue->tx_queue, skb);
1322 1327
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1536 smp_wmb(); 1541 smp_wmb();
1537 queue->dealloc_prod++; 1542 queue->dealloc_prod++;
1538 } while (ubuf); 1543 } while (ubuf);
1539 wake_up(&queue->dealloc_wq);
1540 spin_unlock_irqrestore(&queue->callback_lock, flags); 1544 spin_unlock_irqrestore(&queue->callback_lock, flags);
1541 1545
1542 if (likely(zerocopy_success)) 1546 if (likely(zerocopy_success))
@@ -1566,13 +1570,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1566 smp_rmb(); 1570 smp_rmb();
1567 1571
1568 while (dc != dp) { 1572 while (dc != dp) {
1569 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1573 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1570 pending_idx = 1574 pending_idx =
1571 queue->dealloc_ring[pending_index(dc++)]; 1575 queue->dealloc_ring[pending_index(dc++)];
1572 1576
1573 pending_idx_release[gop-queue->tx_unmap_ops] = 1577 pending_idx_release[gop - queue->tx_unmap_ops] =
1574 pending_idx; 1578 pending_idx;
1575 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1579 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1576 queue->mmap_pages[pending_idx]; 1580 queue->mmap_pages[pending_idx];
1577 gnttab_set_unmap_op(gop, 1581 gnttab_set_unmap_op(gop,
1578 idx_to_kaddr(queue, pending_idx), 1582 idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
114 ntb->dev.bus = &ntb_bus; 114 ntb->dev.bus = &ntb_bus;
115 ntb->dev.parent = &ntb->pdev->dev; 115 ntb->dev.parent = &ntb->pdev->dev;
116 ntb->dev.release = ntb_dev_release; 116 ntb->dev.release = ntb_dev_release;
117 dev_set_name(&ntb->dev, pci_name(ntb->pdev)); 117 dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
118 118
119 ntb->ctx = NULL; 119 ntb->ctx = NULL;
120 ntb->ctx_ops = NULL; 120 ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..1c6386d5f79c 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
142 142
143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void *data, int len); 144 void *data, int len);
145 struct list_head rx_post_q;
145 struct list_head rx_pend_q; 146 struct list_head rx_pend_q;
146 struct list_head rx_free_q; 147 struct list_head rx_free_q;
147 spinlock_t ntb_rx_pend_q_lock; 148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
148 spinlock_t ntb_rx_free_q_lock; 149 spinlock_t ntb_rx_q_lock;
149 void *rx_buff; 150 void *rx_buff;
150 unsigned int rx_index; 151 unsigned int rx_index;
151 unsigned int rx_max_entry; 152 unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
211 bool link_is_up; 212 bool link_is_up;
212 struct delayed_work link_work; 213 struct delayed_work link_work;
213 struct work_struct link_cleanup; 214 struct work_struct link_cleanup;
215
216 struct dentry *debugfs_node_dir;
214}; 217};
215 218
216enum { 219enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
436 char *buf; 439 char *buf;
437 ssize_t ret, out_offset, out_count; 440 ssize_t ret, out_offset, out_count;
438 441
442 qp = filp->private_data;
443
444 if (!qp || !qp->link_is_up)
445 return 0;
446
439 out_count = 1000; 447 out_count = 1000;
440 448
441 buf = kmalloc(out_count, GFP_KERNEL); 449 buf = kmalloc(out_count, GFP_KERNEL);
442 if (!buf) 450 if (!buf)
443 return -ENOMEM; 451 return -ENOMEM;
444 452
445 qp = filp->private_data;
446 out_offset = 0; 453 out_offset = 0;
447 out_offset += snprintf(buf + out_offset, out_count - out_offset, 454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "NTB QP stats\n"); 455 "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
534 return entry; 541 return entry;
535} 542}
536 543
544static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
545 struct list_head *list,
546 struct list_head *to_list)
547{
548 struct ntb_queue_entry *entry;
549 unsigned long flags;
550
551 spin_lock_irqsave(lock, flags);
552
553 if (list_empty(list)) {
554 entry = NULL;
555 } else {
556 entry = list_first_entry(list, struct ntb_queue_entry, entry);
557 list_move_tail(&entry->entry, to_list);
558 }
559
560 spin_unlock_irqrestore(lock, flags);
561
562 return entry;
563}
564
537static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 565static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
538 unsigned int qp_num) 566 unsigned int qp_num)
539{ 567{
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
601} 629}
602 630
603static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 631static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
604 unsigned int size) 632 resource_size_t size)
605{ 633{
606 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 634 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
607 struct pci_dev *pdev = nt->ndev->pdev; 635 struct pci_dev *pdev = nt->ndev->pdev;
608 unsigned int xlat_size, buff_size; 636 size_t xlat_size, buff_size;
609 int rc; 637 int rc;
610 638
639 if (!size)
640 return -EINVAL;
641
611 xlat_size = round_up(size, mw->xlat_align_size); 642 xlat_size = round_up(size, mw->xlat_align_size);
612 buff_size = round_up(size, mw->xlat_align); 643 buff_size = round_up(size, mw->xlat_align);
613 644
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
627 if (!mw->virt_addr) { 658 if (!mw->virt_addr) {
628 mw->xlat_size = 0; 659 mw->xlat_size = 0;
629 mw->buff_size = 0; 660 mw->buff_size = 0;
630 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", 661 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
631 buff_size); 662 buff_size);
632 return -ENOMEM; 663 return -ENOMEM;
633 } 664 }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
867 898
868 if (qp->event_handler) 899 if (qp->event_handler)
869 qp->event_handler(qp->cb_data, qp->link_is_up); 900 qp->event_handler(qp->cb_data, qp->link_is_up);
901
902 tasklet_schedule(&qp->rxc_db_work);
870 } else if (nt->link_is_up) 903 } else if (nt->link_is_up)
871 schedule_delayed_work(&qp->link_work, 904 schedule_delayed_work(&qp->link_work,
872 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
923 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 956 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
924 qp->tx_max_entry = tx_size / qp->tx_max_frame; 957 qp->tx_max_entry = tx_size / qp->tx_max_frame;
925 958
926 if (nt_debugfs_dir) { 959 if (nt->debugfs_node_dir) {
927 char debugfs_name[4]; 960 char debugfs_name[4];
928 961
929 snprintf(debugfs_name, 4, "qp%d", qp_num); 962 snprintf(debugfs_name, 4, "qp%d", qp_num);
930 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 963 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
931 nt_debugfs_dir); 964 nt->debugfs_node_dir);
932 965
933 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 966 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
934 qp->debugfs_dir, qp, 967 qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
941 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 974 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
942 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 975 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
943 976
944 spin_lock_init(&qp->ntb_rx_pend_q_lock); 977 spin_lock_init(&qp->ntb_rx_q_lock);
945 spin_lock_init(&qp->ntb_rx_free_q_lock);
946 spin_lock_init(&qp->ntb_tx_free_q_lock); 978 spin_lock_init(&qp->ntb_tx_free_q_lock);
947 979
980 INIT_LIST_HEAD(&qp->rx_post_q);
948 INIT_LIST_HEAD(&qp->rx_pend_q); 981 INIT_LIST_HEAD(&qp->rx_pend_q);
949 INIT_LIST_HEAD(&qp->rx_free_q); 982 INIT_LIST_HEAD(&qp->rx_free_q);
950 INIT_LIST_HEAD(&qp->tx_free_q); 983 INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1031 goto err2; 1064 goto err2;
1032 } 1065 }
1033 1066
1067 if (nt_debugfs_dir) {
1068 nt->debugfs_node_dir =
1069 debugfs_create_dir(pci_name(ndev->pdev),
1070 nt_debugfs_dir);
1071 }
1072
1034 for (i = 0; i < qp_count; i++) { 1073 for (i = 0; i < qp_count; i++) {
1035 rc = ntb_transport_init_queue(nt, i); 1074 rc = ntb_transport_init_queue(nt, i);
1036 if (rc) 1075 if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1107 kfree(nt); 1146 kfree(nt);
1108} 1147}
1109 1148
1110static void ntb_rx_copy_callback(void *data) 1149static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1111{ 1150{
1112 struct ntb_queue_entry *entry = data; 1151 struct ntb_queue_entry *entry;
1113 struct ntb_transport_qp *qp = entry->qp; 1152 void *cb_data;
1114 void *cb_data = entry->cb_data; 1153 unsigned int len;
1115 unsigned int len = entry->len; 1154 unsigned long irqflags;
1116 struct ntb_payload_header *hdr = entry->rx_hdr; 1155
1156 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1157
1158 while (!list_empty(&qp->rx_post_q)) {
1159 entry = list_first_entry(&qp->rx_post_q,
1160 struct ntb_queue_entry, entry);
1161 if (!(entry->flags & DESC_DONE_FLAG))
1162 break;
1163
1164 entry->rx_hdr->flags = 0;
1165 iowrite32(entry->index, &qp->rx_info->entry);
1117 1166
1118 hdr->flags = 0; 1167 cb_data = entry->cb_data;
1168 len = entry->len;
1119 1169
1120 iowrite32(entry->index, &qp->rx_info->entry); 1170 list_move_tail(&entry->entry, &qp->rx_free_q);
1121 1171
1122 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1172 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1123 1173
1124 if (qp->rx_handler && qp->client_ready) 1174 if (qp->rx_handler && qp->client_ready)
1125 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1175 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1176
1177 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1178 }
1179
1180 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1181}
1182
1183static void ntb_rx_copy_callback(void *data)
1184{
1185 struct ntb_queue_entry *entry = data;
1186
1187 entry->flags |= DESC_DONE_FLAG;
1188
1189 ntb_complete_rxc(entry->qp);
1126} 1190}
1127 1191
1128static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1192static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1138 ntb_rx_copy_callback(entry); 1202 ntb_rx_copy_callback(entry);
1139} 1203}
1140 1204
1141static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, 1205static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1142 size_t len)
1143{ 1206{
1144 struct dma_async_tx_descriptor *txd; 1207 struct dma_async_tx_descriptor *txd;
1145 struct ntb_transport_qp *qp = entry->qp; 1208 struct ntb_transport_qp *qp = entry->qp;
1146 struct dma_chan *chan = qp->dma_chan; 1209 struct dma_chan *chan = qp->dma_chan;
1147 struct dma_device *device; 1210 struct dma_device *device;
1148 size_t pay_off, buff_off; 1211 size_t pay_off, buff_off, len;
1149 struct dmaengine_unmap_data *unmap; 1212 struct dmaengine_unmap_data *unmap;
1150 dma_cookie_t cookie; 1213 dma_cookie_t cookie;
1151 void *buf = entry->buf; 1214 void *buf = entry->buf;
1152 1215
1153 entry->len = len; 1216 len = entry->len;
1154 1217
1155 if (!chan) 1218 if (!chan)
1156 goto err; 1219 goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1226 struct ntb_payload_header *hdr; 1289 struct ntb_payload_header *hdr;
1227 struct ntb_queue_entry *entry; 1290 struct ntb_queue_entry *entry;
1228 void *offset; 1291 void *offset;
1229 int rc;
1230 1292
1231 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1293 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1232 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1294 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1255 return -EIO; 1317 return -EIO;
1256 } 1318 }
1257 1319
1258 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1320 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1259 if (!entry) { 1321 if (!entry) {
1260 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1322 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1261 qp->rx_err_no_buf++; 1323 qp->rx_err_no_buf++;
1262 1324 return -EAGAIN;
1263 rc = -ENOMEM;
1264 goto err;
1265 } 1325 }
1266 1326
1327 entry->rx_hdr = hdr;
1328 entry->index = qp->rx_index;
1329
1267 if (hdr->len > entry->len) { 1330 if (hdr->len > entry->len) {
1268 dev_dbg(&qp->ndev->pdev->dev, 1331 dev_dbg(&qp->ndev->pdev->dev,
1269 "receive buffer overflow! Wanted %d got %d\n", 1332 "receive buffer overflow! Wanted %d got %d\n",
1270 hdr->len, entry->len); 1333 hdr->len, entry->len);
1271 qp->rx_err_oflow++; 1334 qp->rx_err_oflow++;
1272 1335
1273 rc = -EIO; 1336 entry->len = -EIO;
1274 goto err; 1337 entry->flags |= DESC_DONE_FLAG;
1275 }
1276 1338
1277 dev_dbg(&qp->ndev->pdev->dev, 1339 ntb_complete_rxc(qp);
1278 "RX OK index %u ver %u size %d into buf size %d\n", 1340 } else {
1279 qp->rx_index, hdr->ver, hdr->len, entry->len); 1341 dev_dbg(&qp->ndev->pdev->dev,
1342 "RX OK index %u ver %u size %d into buf size %d\n",
1343 qp->rx_index, hdr->ver, hdr->len, entry->len);
1280 1344
1281 qp->rx_bytes += hdr->len; 1345 qp->rx_bytes += hdr->len;
1282 qp->rx_pkts++; 1346 qp->rx_pkts++;
1283 1347
1284 entry->index = qp->rx_index; 1348 entry->len = hdr->len;
1285 entry->rx_hdr = hdr;
1286 1349
1287 ntb_async_rx(entry, offset, hdr->len); 1350 ntb_async_rx(entry, offset);
1351 }
1288 1352
1289 qp->rx_index++; 1353 qp->rx_index++;
1290 qp->rx_index %= qp->rx_max_entry; 1354 qp->rx_index %= qp->rx_max_entry;
1291 1355
1292 return 0; 1356 return 0;
1293
1294err:
1295 /* FIXME: if this syncrhonous update of the rx_index gets ahead of
1296 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
1297 * scenarios:
1298 *
1299 * 1) The peer might miss this update, but observe the update
1300 * from the memcpy completion callback. In this case, the buffer will
1301 * not be freed on the peer to be reused for a different packet. The
1302 * successful rx of a later packet would clear the condition, but the
1303 * condition could persist if several rx fail in a row.
1304 *
1305 * 2) The peer may observe this update before the asyncrhonous copy of
1306 * prior packets is completed. The peer may overwrite the buffers of
1307 * the prior packets before they are copied.
1308 *
1309 * 3) Both: the peer may observe the update, and then observe the index
1310 * decrement by the asynchronous completion callback. Who knows what
1311 * badness that will cause.
1312 */
1313 hdr->flags = 0;
1314 iowrite32(qp->rx_index, &qp->rx_info->entry);
1315
1316 return rc;
1317} 1357}
1318 1358
1319static void ntb_transport_rxc_db(unsigned long data) 1359static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
1333 break; 1373 break;
1334 } 1374 }
1335 1375
1336 if (qp->dma_chan) 1376 if (i && qp->dma_chan)
1337 dma_async_issue_pending(qp->dma_chan); 1377 dma_async_issue_pending(qp->dma_chan);
1338 1378
1339 if (i == qp->rx_max_entry) { 1379 if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1609 goto err1; 1649 goto err1;
1610 1650
1611 entry->qp = qp; 1651 entry->qp = qp;
1612 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, 1652 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1613 &qp->rx_free_q); 1653 &qp->rx_free_q);
1614 } 1654 }
1615 1655
@@ -1634,7 +1674,7 @@ err2:
1634 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1674 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1635 kfree(entry); 1675 kfree(entry);
1636err1: 1676err1:
1637 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1677 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1638 kfree(entry); 1678 kfree(entry);
1639 if (qp->dma_chan) 1679 if (qp->dma_chan)
1640 dma_release_channel(qp->dma_chan); 1680 dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1652 */ 1692 */
1653void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1693void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1654{ 1694{
1655 struct ntb_transport_ctx *nt = qp->transport;
1656 struct pci_dev *pdev; 1695 struct pci_dev *pdev;
1657 struct ntb_queue_entry *entry; 1696 struct ntb_queue_entry *entry;
1658 u64 qp_bit; 1697 u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1689 qp->tx_handler = NULL; 1728 qp->tx_handler = NULL;
1690 qp->event_handler = NULL; 1729 qp->event_handler = NULL;
1691 1730
1692 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1731 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1693 kfree(entry); 1732 kfree(entry);
1694 1733
1695 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { 1734 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1696 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); 1735 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1736 kfree(entry);
1737 }
1738
1739 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1740 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1697 kfree(entry); 1741 kfree(entry);
1698 } 1742 }
1699 1743
1700 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1744 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1701 kfree(entry); 1745 kfree(entry);
1702 1746
1703 nt->qp_bitmap_free |= qp_bit; 1747 qp->transport->qp_bitmap_free |= qp_bit;
1704 1748
1705 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1749 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1706} 1750}
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1724 if (!qp || qp->client_ready) 1768 if (!qp || qp->client_ready)
1725 return NULL; 1769 return NULL;
1726 1770
1727 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1771 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1728 if (!entry) 1772 if (!entry)
1729 return NULL; 1773 return NULL;
1730 1774
1731 buf = entry->cb_data; 1775 buf = entry->cb_data;
1732 *len = entry->len; 1776 *len = entry->len;
1733 1777
1734 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1778 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1735 1779
1736 return buf; 1780 return buf;
1737} 1781}
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1757 if (!qp) 1801 if (!qp)
1758 return -EINVAL; 1802 return -EINVAL;
1759 1803
1760 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); 1804 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1761 if (!entry) 1805 if (!entry)
1762 return -ENOMEM; 1806 return -ENOMEM;
1763 1807
1764 entry->cb_data = cb; 1808 entry->cb_data = cb;
1765 entry->buf = data; 1809 entry->buf = data;
1766 entry->len = len; 1810 entry->len = len;
1811 entry->flags = 0;
1812
1813 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1767 1814
1768 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1815 tasklet_schedule(&qp->rxc_db_work);
1769 1816
1770 return 0; 1817 return 0;
1771} 1818}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8eb22c0ca7ce..7e2c43f701bc 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -535,8 +535,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
535 __func__, dimm_name, cmd_name, i); 535 __func__, dimm_name, cmd_name, i);
536 return -ENXIO; 536 return -ENXIO;
537 } 537 }
538 if (!access_ok(VERIFY_READ, p + in_len, in_size))
539 return -EFAULT;
540 if (in_len < sizeof(in_env)) 538 if (in_len < sizeof(in_env))
541 copy = min_t(u32, sizeof(in_env) - in_len, in_size); 539 copy = min_t(u32, sizeof(in_env) - in_len, in_size);
542 else 540 else
@@ -557,8 +555,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
557 __func__, dimm_name, cmd_name, i); 555 __func__, dimm_name, cmd_name, i);
558 return -EFAULT; 556 return -EFAULT;
559 } 557 }
560 if (!access_ok(VERIFY_WRITE, p + in_len + out_len, out_size))
561 return -EFAULT;
562 if (out_len < sizeof(out_env)) 558 if (out_len < sizeof(out_env))
563 copy = min_t(u32, sizeof(out_env) - out_len, out_size); 559 copy = min_t(u32, sizeof(out_env) - out_len, out_size);
564 else 560 else
@@ -570,9 +566,6 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
570 } 566 }
571 567
572 buf_len = out_len + in_len; 568 buf_len = out_len + in_len;
573 if (!access_ok(VERIFY_WRITE, p, sizeof(buf_len)))
574 return -EFAULT;
575
576 if (buf_len > ND_IOCTL_MAX_BUFLEN) { 569 if (buf_len > ND_IOCTL_MAX_BUFLEN) {
577 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__, 570 dev_dbg(dev, "%s:%s cmd: %s buf_len: %zu > %d\n", __func__,
578 dimm_name, cmd_name, buf_len, 571 dimm_name, cmd_name, buf_len,
@@ -706,8 +699,10 @@ int __init nvdimm_bus_init(void)
706 nvdimm_major = rc; 699 nvdimm_major = rc;
707 700
708 nd_class = class_create(THIS_MODULE, "nd"); 701 nd_class = class_create(THIS_MODULE, "nd");
709 if (IS_ERR(nd_class)) 702 if (IS_ERR(nd_class)) {
703 rc = PTR_ERR(nd_class);
710 goto err_class; 704 goto err_class;
705 }
711 706
712 return 0; 707 return 0;
713 708
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..7384455792bf 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
458 nvdimm_bus_unlock(dev); 458 nvdimm_bus_unlock(dev);
459 } 459 }
460 if (is_nd_btt(dev) && probe) { 460 if (is_nd_btt(dev) && probe) {
461 struct nd_btt *nd_btt = to_nd_btt(dev);
462
461 nd_region = to_nd_region(dev->parent); 463 nd_region = to_nd_region(dev->parent);
462 nvdimm_bus_lock(dev); 464 nvdimm_bus_lock(dev);
463 if (nd_region->btt_seed == dev) 465 if (nd_region->btt_seed == dev)
464 nd_region_create_btt_seed(nd_region); 466 nd_region_create_btt_seed(nd_region);
467 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
468 is_nd_blk(dev->parent))
469 nd_region_create_blk_seed(nd_region);
465 nvdimm_bus_unlock(dev); 470 nvdimm_bus_unlock(dev);
466 } 471 }
467} 472}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
47 47
48config OF_ADDRESS 48config OF_ADDRESS
49 def_bool y 49 def_bool y
50 depends on !SPARC 50 depends on !SPARC && HAS_IOMEM
51 select OF_ADDRESS_PCI if PCI 51 select OF_ADDRESS_PCI if PCI
52 52
53config OF_ADDRESS_PCI 53config OF_ADDRESS_PCI
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
979 .remove = unittest_remove, 979 .remove = unittest_remove,
980 .driver = { 980 .driver = {
981 .name = "unittest", 981 .name = "unittest",
982 .owner = THIS_MODULE,
983 .of_match_table = of_match_ptr(unittest_match), 982 .of_match_table = of_match_ptr(unittest_match),
984 }, 983 },
985}; 984};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
1666static struct i2c_driver unittest_i2c_dev_driver = { 1665static struct i2c_driver unittest_i2c_dev_driver = {
1667 .driver = { 1666 .driver = {
1668 .name = "unittest-i2c-dev", 1667 .name = "unittest-i2c-dev",
1669 .owner = THIS_MODULE,
1670 }, 1668 },
1671 .probe = unittest_i2c_dev_probe, 1669 .probe = unittest_i2c_dev_probe,
1672 .remove = unittest_i2c_dev_remove, 1670 .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
1761static struct i2c_driver unittest_i2c_mux_driver = { 1759static struct i2c_driver unittest_i2c_mux_driver = {
1762 .driver = { 1760 .driver = {
1763 .name = "unittest-i2c-mux", 1761 .name = "unittest-i2c-mux",
1764 .owner = THIS_MODULE,
1765 }, 1762 },
1766 .probe = unittest_i2c_mux_probe, 1763 .probe = unittest_i2c_mux_probe,
1767 .remove = unittest_i2c_mux_remove, 1764 .remove = unittest_i2c_mux_remove,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
891 par_dev->dev.release = free_pardevice; 891 par_dev->dev.release = free_pardevice;
892 par_dev->devmodel = true; 892 par_dev->devmodel = true;
893 ret = device_register(&par_dev->dev); 893 ret = device_register(&par_dev->dev);
894 if (ret) 894 if (ret) {
895 goto err_put_dev; 895 put_device(&par_dev->dev);
896 goto err_put_port;
897 }
896 898
897 /* Chain this onto the list */ 899 /* Chain this onto the list */
898 par_dev->prev = NULL; 900 par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
907 spin_unlock(&port->physport->pardevice_lock); 909 spin_unlock(&port->physport->pardevice_lock);
908 pr_debug("%s: cannot grant exclusive access for device %s\n", 910 pr_debug("%s: cannot grant exclusive access for device %s\n",
909 port->name, name); 911 port->name, name);
910 goto err_put_dev; 912 device_unregister(&par_dev->dev);
913 goto err_put_port;
911 } 914 }
912 port->flags |= PARPORT_FLAG_EXCL; 915 port->flags |= PARPORT_FLAG_EXCL;
913 } 916 }
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
938 941
939 return par_dev; 942 return par_dev;
940 943
941err_put_dev:
942 put_device(&par_dev->dev);
943err_free_devname: 944err_free_devname:
944 kfree(devname); 945 kfree(devname);
945err_free_par_dev: 946err_free_par_dev:
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4efcbe6e..944f50015ed0 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
2# PCI configuration 2# PCI configuration
3# 3#
4config PCI_BUS_ADDR_T_64BIT 4config PCI_BUS_ADDR_T_64BIT
5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) 5 def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
6 depends on PCI 6 depends on PCI
7 7
8config PCI_MSI 8config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636681b6..b978bbfe044c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
997 else if (type == PCI_EXP_TYPE_UPSTREAM || 997 else if (type == PCI_EXP_TYPE_UPSTREAM ||
998 type == PCI_EXP_TYPE_DOWNSTREAM) { 998 type == PCI_EXP_TYPE_DOWNSTREAM) {
999 parent = pci_upstream_bridge(pdev); 999 parent = pci_upstream_bridge(pdev);
1000 if (!parent->has_secondary_link) 1000
1001 /*
1002 * Usually there's an upstream device (Root Port or Switch
1003 * Downstream Port), but we can't assume one exists.
1004 */
1005 if (parent && !parent->has_secondary_link)
1001 pdev->has_secondary_link = 1; 1006 pdev->has_secondary_link = 1;
1002 } 1007 }
1003} 1008}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..6b8dd162f644 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
56 56
57config PHY_PXA_28NM_HSIC 57config PHY_PXA_28NM_HSIC
58 tristate "Marvell USB HSIC 28nm PHY Driver" 58 tristate "Marvell USB HSIC 28nm PHY Driver"
59 depends on HAS_IOMEM
59 select GENERIC_PHY 60 select GENERIC_PHY
60 help 61 help
61 Enable this to support Marvell USB HSIC PHY driver for Marvell 62 Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
66 67
67config PHY_PXA_28NM_USB2 68config PHY_PXA_28NM_USB2
68 tristate "Marvell USB 2.0 28nm PHY Driver" 69 tristate "Marvell USB 2.0 28nm PHY Driver"
70 depends on HAS_IOMEM
69 select GENERIC_PHY 71 select GENERIC_PHY
70 help 72 help
71 Enable this to support Marvell USB 2.0 PHY driver for Marvell 73 Enable this to support Marvell USB 2.0 PHY driver for Marvell
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..335e06d66ed9 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
105 105
106static const u32 phy_berlin_pll_dividers[] = { 106static const u32 phy_berlin_pll_dividers[] = {
107 /* Berlin 2 */ 107 /* Berlin 2 */
108 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
109 /* Berlin 2CD */
110 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), 108 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
109 /* Berlin 2CD/Q */
110 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
111}; 111};
112 112
113struct phy_berlin_usb_priv { 113struct phy_berlin_usb_priv {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..2dad7e820ff0 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
212 212
213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); 213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
214} 214}
215EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
215 216
216static struct phy_ops sun4i_usb_phy_ops = { 217static struct phy_ops sun4i_usb_phy_ops = {
217 .init = sun4i_usb_phy_init, 218 .init = sun4i_usb_phy_init,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..08020dc2c7c8 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/phy/omap_control_phy.h> 29#include <linux/phy/omap_control_phy.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/spinlock.h> 31#include <linux/mfd/syscon.h>
32#include <linux/regmap.h>
32 33
33#define PLL_STATUS 0x00000004 34#define PLL_STATUS 0x00000004
34#define PLL_GO 0x00000008 35#define PLL_GO 0x00000008
@@ -53,6 +54,8 @@
53#define PLL_LOCK 0x2 54#define PLL_LOCK 0x2
54#define PLL_IDLE 0x1 55#define PLL_IDLE 0x1
55 56
57#define SATA_PLL_SOFT_RESET BIT(18)
58
56/* 59/*
57 * This is an Empirical value that works, need to confirm the actual 60 * This is an Empirical value that works, need to confirm the actual
58 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status 61 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -83,10 +86,9 @@ struct ti_pipe3 {
83 struct clk *refclk; 86 struct clk *refclk;
84 struct clk *div_clk; 87 struct clk *div_clk;
85 struct pipe3_dpll_map *dpll_map; 88 struct pipe3_dpll_map *dpll_map;
86 bool enabled; 89 struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
87 spinlock_t lock; /* serialize clock enable/disable */ 90 unsigned int dpll_reset_reg; /* reg. index within syscon */
88 /* the below flag is needed specifically for SATA */ 91 bool sata_refclk_enabled;
89 bool refclk_enabled;
90}; 92};
91 93
92static struct pipe3_dpll_map dpll_map_usb[] = { 94static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +139,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
137 return NULL; 139 return NULL;
138} 140}
139 141
142static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
143static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
144
140static int ti_pipe3_power_off(struct phy *x) 145static int ti_pipe3_power_off(struct phy *x)
141{ 146{
142 struct ti_pipe3 *phy = phy_get_drvdata(x); 147 struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +222,7 @@ static int ti_pipe3_init(struct phy *x)
217 u32 val; 222 u32 val;
218 int ret = 0; 223 int ret = 0;
219 224
225 ti_pipe3_enable_clocks(phy);
220 /* 226 /*
221 * Set pcie_pcs register to 0x96 for proper functioning of phy 227 * Set pcie_pcs register to 0x96 for proper functioning of phy
222 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table 228 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +256,46 @@ static int ti_pipe3_exit(struct phy *x)
250 u32 val; 256 u32 val;
251 unsigned long timeout; 257 unsigned long timeout;
252 258
253 /* SATA DPLL can't be powered down due to Errata i783 and PCIe 259 /* If dpll_reset_syscon is not present we wont power down SATA DPLL
254 * does not have internal DPLL 260 * due to Errata i783
255 */ 261 */
256 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || 262 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
257 of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) 263 !phy->dpll_reset_syscon)
258 return 0; 264 return 0;
259 265
260 /* Put DPLL in IDLE mode */ 266 /* PCIe doesn't have internal DPLL */
261 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); 267 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
262 val |= PLL_IDLE; 268 /* Put DPLL in IDLE mode */
263 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); 269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
270 val |= PLL_IDLE;
271 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
264 272
265 /* wait for LDO and Oscillator to power down */ 273 /* wait for LDO and Oscillator to power down */
266 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); 274 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
267 do { 275 do {
268 cpu_relax(); 276 cpu_relax();
269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 277 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
270 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) 278 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
271 break; 279 break;
272 } while (!time_after(jiffies, timeout)); 280 } while (!time_after(jiffies, timeout));
281
282 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
283 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
284 val);
285 return -EBUSY;
286 }
287 }
273 288
274 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { 289 /* i783: SATA needs control bit toggle after PLL unlock */
275 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", 290 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
276 val); 291 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
277 return -EBUSY; 292 SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
293 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
294 SATA_PLL_SOFT_RESET, 0);
278 } 295 }
279 296
297 ti_pipe3_disable_clocks(phy);
298
280 return 0; 299 return 0;
281} 300}
282static struct phy_ops ops = { 301static struct phy_ops ops = {
@@ -306,7 +325,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
306 return -ENOMEM; 325 return -ENOMEM;
307 326
308 phy->dev = &pdev->dev; 327 phy->dev = &pdev->dev;
309 spin_lock_init(&phy->lock);
310 328
311 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 329 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
312 match = of_match_device(ti_pipe3_id_table, &pdev->dev); 330 match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
350 } 368 }
351 } else { 369 } else {
352 phy->wkupclk = ERR_PTR(-ENODEV); 370 phy->wkupclk = ERR_PTR(-ENODEV);
371 phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
372 "syscon-pllreset");
373 if (IS_ERR(phy->dpll_reset_syscon)) {
374 dev_info(&pdev->dev,
375 "can't get syscon-pllreset, sata dpll won't idle\n");
376 phy->dpll_reset_syscon = NULL;
377 } else {
378 if (of_property_read_u32_index(node,
379 "syscon-pllreset", 1,
380 &phy->dpll_reset_reg)) {
381 dev_err(&pdev->dev,
382 "couldn't get pllreset reg. offset\n");
383 return -EINVAL;
384 }
385 }
353 } 386 }
354 387
355 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 388 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -403,6 +436,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
403 platform_set_drvdata(pdev, phy); 436 platform_set_drvdata(pdev, phy);
404 pm_runtime_enable(phy->dev); 437 pm_runtime_enable(phy->dev);
405 438
439 /*
440 * Prevent auto-disable of refclk for SATA PHY due to Errata i783
441 */
442 if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
443 if (!IS_ERR(phy->refclk)) {
444 clk_prepare_enable(phy->refclk);
445 phy->sata_refclk_enabled = true;
446 }
447 }
448
406 generic_phy = devm_phy_create(phy->dev, NULL, &ops); 449 generic_phy = devm_phy_create(phy->dev, NULL, &ops);
407 if (IS_ERR(generic_phy)) 450 if (IS_ERR(generic_phy))
408 return PTR_ERR(generic_phy); 451 return PTR_ERR(generic_phy);
@@ -413,63 +456,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
413 if (IS_ERR(phy_provider)) 456 if (IS_ERR(phy_provider))
414 return PTR_ERR(phy_provider); 457 return PTR_ERR(phy_provider);
415 458
416 pm_runtime_get(&pdev->dev);
417
418 return 0; 459 return 0;
419} 460}
420 461
421static int ti_pipe3_remove(struct platform_device *pdev) 462static int ti_pipe3_remove(struct platform_device *pdev)
422{ 463{
423 if (!pm_runtime_suspended(&pdev->dev))
424 pm_runtime_put(&pdev->dev);
425 pm_runtime_disable(&pdev->dev); 464 pm_runtime_disable(&pdev->dev);
426 465
427 return 0; 466 return 0;
428} 467}
429 468
430#ifdef CONFIG_PM 469static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
431static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
432{ 470{
433 if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { 471 int ret = 0;
434 int ret;
435 472
473 if (!IS_ERR(phy->refclk)) {
436 ret = clk_prepare_enable(phy->refclk); 474 ret = clk_prepare_enable(phy->refclk);
437 if (ret) { 475 if (ret) {
438 dev_err(phy->dev, "Failed to enable refclk %d\n", ret); 476 dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
439 return ret; 477 return ret;
440 } 478 }
441 phy->refclk_enabled = true;
442 } 479 }
443 480
444 return 0;
445}
446
447static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
448{
449 if (!IS_ERR(phy->refclk))
450 clk_disable_unprepare(phy->refclk);
451
452 phy->refclk_enabled = false;
453}
454
455static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
456{
457 int ret = 0;
458 unsigned long flags;
459
460 spin_lock_irqsave(&phy->lock, flags);
461 if (phy->enabled)
462 goto err1;
463
464 ret = ti_pipe3_enable_refclk(phy);
465 if (ret)
466 goto err1;
467
468 if (!IS_ERR(phy->wkupclk)) { 481 if (!IS_ERR(phy->wkupclk)) {
469 ret = clk_prepare_enable(phy->wkupclk); 482 ret = clk_prepare_enable(phy->wkupclk);
470 if (ret) { 483 if (ret) {
471 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); 484 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
472 goto err2; 485 goto disable_refclk;
473 } 486 }
474 } 487 }
475 488
@@ -477,96 +490,43 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
477 ret = clk_prepare_enable(phy->div_clk); 490 ret = clk_prepare_enable(phy->div_clk);
478 if (ret) { 491 if (ret) {
479 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); 492 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
480 goto err3; 493 goto disable_wkupclk;
481 } 494 }
482 } 495 }
483 496
484 phy->enabled = true;
485 spin_unlock_irqrestore(&phy->lock, flags);
486 return 0; 497 return 0;
487 498
488err3: 499disable_wkupclk:
489 if (!IS_ERR(phy->wkupclk)) 500 if (!IS_ERR(phy->wkupclk))
490 clk_disable_unprepare(phy->wkupclk); 501 clk_disable_unprepare(phy->wkupclk);
491 502
492err2: 503disable_refclk:
493 if (!IS_ERR(phy->refclk)) 504 if (!IS_ERR(phy->refclk))
494 clk_disable_unprepare(phy->refclk); 505 clk_disable_unprepare(phy->refclk);
495 506
496 ti_pipe3_disable_refclk(phy);
497err1:
498 spin_unlock_irqrestore(&phy->lock, flags);
499 return ret; 507 return ret;
500} 508}
501 509
502static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) 510static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
503{ 511{
504 unsigned long flags;
505
506 spin_lock_irqsave(&phy->lock, flags);
507 if (!phy->enabled) {
508 spin_unlock_irqrestore(&phy->lock, flags);
509 return;
510 }
511
512 if (!IS_ERR(phy->wkupclk)) 512 if (!IS_ERR(phy->wkupclk))
513 clk_disable_unprepare(phy->wkupclk); 513 clk_disable_unprepare(phy->wkupclk);
514 /* Don't disable refclk for SATA PHY due to Errata i783 */ 514 if (!IS_ERR(phy->refclk)) {
515 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 515 clk_disable_unprepare(phy->refclk);
516 ti_pipe3_disable_refclk(phy); 516 /*
517 * SATA refclk needs an additional disable as we left it
518 * on in probe to avoid Errata i783
519 */
520 if (phy->sata_refclk_enabled) {
521 clk_disable_unprepare(phy->refclk);
522 phy->sata_refclk_enabled = false;
523 }
524 }
525
517 if (!IS_ERR(phy->div_clk)) 526 if (!IS_ERR(phy->div_clk))
518 clk_disable_unprepare(phy->div_clk); 527 clk_disable_unprepare(phy->div_clk);
519 phy->enabled = false;
520 spin_unlock_irqrestore(&phy->lock, flags);
521} 528}
522 529
523static int ti_pipe3_runtime_suspend(struct device *dev)
524{
525 struct ti_pipe3 *phy = dev_get_drvdata(dev);
526
527 ti_pipe3_disable_clocks(phy);
528 return 0;
529}
530
531static int ti_pipe3_runtime_resume(struct device *dev)
532{
533 struct ti_pipe3 *phy = dev_get_drvdata(dev);
534 int ret = 0;
535
536 ret = ti_pipe3_enable_clocks(phy);
537 return ret;
538}
539
540static int ti_pipe3_suspend(struct device *dev)
541{
542 struct ti_pipe3 *phy = dev_get_drvdata(dev);
543
544 ti_pipe3_disable_clocks(phy);
545 return 0;
546}
547
548static int ti_pipe3_resume(struct device *dev)
549{
550 struct ti_pipe3 *phy = dev_get_drvdata(dev);
551 int ret;
552
553 ret = ti_pipe3_enable_clocks(phy);
554 if (ret)
555 return ret;
556
557 pm_runtime_disable(dev);
558 pm_runtime_set_active(dev);
559 pm_runtime_enable(dev);
560 return 0;
561}
562#endif
563
564static const struct dev_pm_ops ti_pipe3_pm_ops = {
565 SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
566 ti_pipe3_runtime_resume, NULL)
567 SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
568};
569
570static const struct of_device_id ti_pipe3_id_table[] = { 530static const struct of_device_id ti_pipe3_id_table[] = {
571 { 531 {
572 .compatible = "ti,phy-usb3", 532 .compatible = "ti,phy-usb3",
@@ -592,7 +552,6 @@ static struct platform_driver ti_pipe3_driver = {
592 .remove = ti_pipe3_remove, 552 .remove = ti_pipe3_remove,
593 .driver = { 553 .driver = {
594 .name = "ti-pipe3", 554 .name = "ti-pipe3",
595 .pm = &ti_pipe3_pm_ops,
596 .of_match_table = ti_pipe3_id_table, 555 .of_match_table = ti_pipe3_id_table,
597 }, 556 },
598}; 557};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..6177315ab74e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
473 473
474 spin_lock_irqsave(&pc->irq_lock[bank], flags); 474 spin_lock_irqsave(&pc->irq_lock[bank], flags);
475 bcm2835_gpio_irq_config(pc, gpio, false); 475 bcm2835_gpio_irq_config(pc, gpio, false);
476 /* Clear events that were latched prior to clearing event sources */
477 bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
476 clear_bit(offset, &pc->enabled_irq_map[bank]); 478 clear_bit(offset, &pc->enabled_irq_map[bank]);
477 spin_unlock_irqrestore(&pc->irq_lock[bank], flags); 479 spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
478} 480}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
403 unsigned num_configs) 403 unsigned num_configs)
404{ 404{
405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); 405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
406 const struct imx1_pinctrl_soc_info *info = ipctl->info;
407 int i; 406 int i;
408 407
409 for (i = 0; i != num_configs; ++i) { 408 for (i = 0; i != num_configs; ++i) {
410 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); 409 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
411 410
412 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", 411 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
413 info->pins[pin_id].name); 412 pin_desc_get(pctldev, pin_id)->name);
414 } 413 }
415 414
416 return 0; 415 return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
787 .set_mux = abx500_pmx_set, 787 .set_mux = abx500_pmx_set,
788 .gpio_request_enable = abx500_gpio_request_enable, 788 .gpio_request_enable = abx500_gpio_request_enable,
789 .gpio_disable_free = abx500_gpio_disable_free, 789 .gpio_disable_free = abx500_gpio_disable_free,
790 .strict = true,
791}; 790};
792 791
793static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) 792static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..347c763a6a78 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
823 break; 823 break;
824 824
825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
826 if (param) 826 if (param_val)
827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); 827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
828 else 828 else
829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); 829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
876 break; 876 break;
877 877
878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
879 if (param) 879 if (param_val)
880 *reg &= ~LPC18XX_SCU_PIN_ZIF; 880 *reg &= ~LPC18XX_SCU_PIN_ZIF;
881 else 881 else
882 *reg |= LPC18XX_SCU_PIN_ZIF; 882 *reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..0b8d480171a3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
1760 int res; 1760 int res;
1761 1761
1762 res = request_irq(pcs_soc->irq, pcs_irq_handler, 1762 res = request_irq(pcs_soc->irq, pcs_irq_handler,
1763 IRQF_SHARED | IRQF_NO_SUSPEND, 1763 IRQF_SHARED | IRQF_NO_SUSPEND |
1764 IRQF_NO_THREAD,
1764 name, pcs_soc); 1765 name, pcs_soc);
1765 if (res) { 1766 if (res) {
1766 pcs_soc->irq = -1; 1767 pcs_soc->irq = -1;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
33#include "../core.h" 33#include "../core.h"
34#include "pinctrl-samsung.h" 34#include "pinctrl-samsung.h"
35 35
36#define GROUP_SUFFIX "-grp"
37#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
38#define FUNCTION_SUFFIX "-mux"
39#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
40
41/* list of all possible config options supported */ 36/* list of all possible config options supported */
42static struct pin_config { 37static struct pin_config {
43 const char *property; 38 const char *property;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..0874cfee6889 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
224 224
225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ 225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
226#define _GP_GPIO(bank, _pin, _name, sfx) \ 226#define _GP_GPIO(bank, _pin, _name, sfx) \
227 [(bank * 32) + _pin] = { \ 227 { \
228 .pin = (bank * 32) + _pin, \ 228 .pin = (bank * 32) + _pin, \
229 .name = __stringify(_name), \ 229 .name = __stringify(_name), \
230 .enum_id = _name##_DATA, \ 230 .enum_id = _name##_DATA, \
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index f87a5eaf75da..0afaf79a4e51 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr pinmux 2 * Driver for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * Inspired from: 7 * Inspired from:
8 * - U300 Pinctl drivers 8 * - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index dc8bf85ecb2a..27c2cc8d83ad 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
2 * Driver header file for the ST Microelectronics SPEAr pinmux 2 * Driver header file for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index a7bdc537efa7..92611bb757ac 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1310 pinmux 2 * Driver for the ST Microelectronics SPEAr1310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void)
2730} 2730}
2731module_exit(spear1310_pinctrl_exit); 2731module_exit(spear1310_pinctrl_exit);
2732 2732
2733MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 2733MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
2734MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); 2734MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
2735MODULE_LICENSE("GPL v2"); 2735MODULE_LICENSE("GPL v2");
2736MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); 2736MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index f43ec85a0328..f842e9dc40d0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1340 pinmux 2 * Driver for the ST Microelectronics SPEAr1340 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void)
2046} 2046}
2047module_exit(spear1340_pinctrl_exit); 2047module_exit(spear1340_pinctrl_exit);
2048 2048
2049MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 2049MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
2050MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); 2050MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
2051MODULE_LICENSE("GPL v2"); 2051MODULE_LICENSE("GPL v2");
2052MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); 2052MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index da8990a8eeef..d998a2ccff48 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr300 pinmux 2 * Driver for the ST Microelectronics SPEAr300 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void)
703} 703}
704module_exit(spear300_pinctrl_exit); 704module_exit(spear300_pinctrl_exit);
705 705
706MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 706MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
707MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); 707MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
708MODULE_LICENSE("GPL v2"); 708MODULE_LICENSE("GPL v2");
709MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); 709MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 31ede51e819b..609b18aceb16 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr310 pinmux 2 * Driver for the ST Microelectronics SPEAr310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void)
426} 426}
427module_exit(spear310_pinctrl_exit); 427module_exit(spear310_pinctrl_exit);
428 428
429MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 429MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
430MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); 430MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
431MODULE_LICENSE("GPL v2"); 431MODULE_LICENSE("GPL v2");
432MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match); 432MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index 506e40b641e0..c07114431bd4 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr320 pinmux 2 * Driver for the ST Microelectronics SPEAr320 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void)
3467} 3467}
3468module_exit(spear320_pinctrl_exit); 3468module_exit(spear320_pinctrl_exit);
3469 3469
3470MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 3470MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
3471MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); 3471MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
3472MODULE_LICENSE("GPL v2"); 3472MODULE_LICENSE("GPL v2");
3473MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); 3473MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 12ee21af766b..d3119aafe709 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr3xx pinmux 2 * Driver for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 7860b36053c4..ce19dcf8f08b 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
2 * Header file for the ST Microelectronics SPEAr3xx pinmux 2 * Header file for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig CHROME_PLATFORMS 5menuconfig CHROME_PLATFORMS
6 bool "Platform support for Chrome hardware" 6 bool "Platform support for Chrome hardware"
7 depends on X86 || ARM
8 ---help--- 7 ---help---
9 Say Y here to get to see options for platform support for 8 Say Y here to get to see options for platform support for
10 various Chromebooks and Chromeboxes. This option alone does 9 various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ed317ccac4a2..aaeeae81e3a9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
309static struct calling_interface_buffer *buffer; 309static struct calling_interface_buffer *buffer;
310static DEFINE_MUTEX(buffer_mutex); 310static DEFINE_MUTEX(buffer_mutex);
311 311
312static int hwswitch_state; 312static void clear_buffer(void)
313{
314 memset(buffer, 0, sizeof(struct calling_interface_buffer));
315}
313 316
314static void get_buffer(void) 317static void get_buffer(void)
315{ 318{
316 mutex_lock(&buffer_mutex); 319 mutex_lock(&buffer_mutex);
317 memset(buffer, 0, sizeof(struct calling_interface_buffer)); 320 clear_buffer();
318} 321}
319 322
320static void release_buffer(void) 323static void release_buffer(void)
@@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked)
548 int disable = blocked ? 1 : 0; 551 int disable = blocked ? 1 : 0;
549 unsigned long radio = (unsigned long)data; 552 unsigned long radio = (unsigned long)data;
550 int hwswitch_bit = (unsigned long)data - 1; 553 int hwswitch_bit = (unsigned long)data - 1;
554 int hwswitch;
555 int status;
556 int ret;
551 557
552 get_buffer(); 558 get_buffer();
559
560 dell_send_request(buffer, 17, 11);
561 ret = buffer->output[0];
562 status = buffer->output[1];
563
564 if (ret != 0)
565 goto out;
566
567 clear_buffer();
568
569 buffer->input[0] = 0x2;
553 dell_send_request(buffer, 17, 11); 570 dell_send_request(buffer, 17, 11);
571 ret = buffer->output[0];
572 hwswitch = buffer->output[1];
554 573
555 /* If the hardware switch controls this radio, and the hardware 574 /* If the hardware switch controls this radio, and the hardware
556 switch is disabled, always disable the radio */ 575 switch is disabled, always disable the radio */
557 if ((hwswitch_state & BIT(hwswitch_bit)) && 576 if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
558 !(buffer->output[1] & BIT(16))) 577 (status & BIT(0)) && !(status & BIT(16)))
559 disable = 1; 578 disable = 1;
560 579
580 clear_buffer();
581
561 buffer->input[0] = (1 | (radio<<8) | (disable << 16)); 582 buffer->input[0] = (1 | (radio<<8) | (disable << 16));
562 dell_send_request(buffer, 17, 11); 583 dell_send_request(buffer, 17, 11);
584 ret = buffer->output[0];
563 585
586 out:
564 release_buffer(); 587 release_buffer();
565 return 0; 588 return dell_smi_error(ret);
566} 589}
567 590
568/* Must be called with the buffer held */ 591/* Must be called with the buffer held */
@@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
572 if (status & BIT(0)) { 595 if (status & BIT(0)) {
573 /* Has hw-switch, sync sw_state to BIOS */ 596 /* Has hw-switch, sync sw_state to BIOS */
574 int block = rfkill_blocked(rfkill); 597 int block = rfkill_blocked(rfkill);
598 clear_buffer();
575 buffer->input[0] = (1 | (radio << 8) | (block << 16)); 599 buffer->input[0] = (1 | (radio << 8) | (block << 16));
576 dell_send_request(buffer, 17, 11); 600 dell_send_request(buffer, 17, 11);
577 } else { 601 } else {
@@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
581} 605}
582 606
583static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, 607static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
584 int status) 608 int status, int hwswitch)
585{ 609{
586 if (hwswitch_state & (BIT(radio - 1))) 610 if (hwswitch & (BIT(radio - 1)))
587 rfkill_set_hw_state(rfkill, !(status & BIT(16))); 611 rfkill_set_hw_state(rfkill, !(status & BIT(16)));
588} 612}
589 613
590static void dell_rfkill_query(struct rfkill *rfkill, void *data) 614static void dell_rfkill_query(struct rfkill *rfkill, void *data)
591{ 615{
616 int radio = ((unsigned long)data & 0xF);
617 int hwswitch;
592 int status; 618 int status;
619 int ret;
593 620
594 get_buffer(); 621 get_buffer();
622
595 dell_send_request(buffer, 17, 11); 623 dell_send_request(buffer, 17, 11);
624 ret = buffer->output[0];
596 status = buffer->output[1]; 625 status = buffer->output[1];
597 626
598 dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status); 627 if (ret != 0 || !(status & BIT(0))) {
628 release_buffer();
629 return;
630 }
631
632 clear_buffer();
633
634 buffer->input[0] = 0x2;
635 dell_send_request(buffer, 17, 11);
636 ret = buffer->output[0];
637 hwswitch = buffer->output[1];
599 638
600 release_buffer(); 639 release_buffer();
640
641 if (ret != 0)
642 return;
643
644 dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
601} 645}
602 646
603static const struct rfkill_ops dell_rfkill_ops = { 647static const struct rfkill_ops dell_rfkill_ops = {
@@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir;
609 653
610static int dell_debugfs_show(struct seq_file *s, void *data) 654static int dell_debugfs_show(struct seq_file *s, void *data)
611{ 655{
656 int hwswitch_state;
657 int hwswitch_ret;
612 int status; 658 int status;
659 int ret;
613 660
614 get_buffer(); 661 get_buffer();
662
615 dell_send_request(buffer, 17, 11); 663 dell_send_request(buffer, 17, 11);
664 ret = buffer->output[0];
616 status = buffer->output[1]; 665 status = buffer->output[1];
666
667 clear_buffer();
668
669 buffer->input[0] = 0x2;
670 dell_send_request(buffer, 17, 11);
671 hwswitch_ret = buffer->output[0];
672 hwswitch_state = buffer->output[1];
673
617 release_buffer(); 674 release_buffer();
618 675
676 seq_printf(s, "return:\t%d\n", ret);
619 seq_printf(s, "status:\t0x%X\n", status); 677 seq_printf(s, "status:\t0x%X\n", status);
620 seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", 678 seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n",
621 status & BIT(0)); 679 status & BIT(0));
@@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
657 seq_printf(s, "Bit 21: WiGig is blocked: %lu\n", 715 seq_printf(s, "Bit 21: WiGig is blocked: %lu\n",
658 (status & BIT(21)) >> 21); 716 (status & BIT(21)) >> 21);
659 717
660 seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state); 718 seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
719 seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
661 seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n", 720 seq_printf(s, "Bit 0 : Wifi controlled by switch: %lu\n",
662 hwswitch_state & BIT(0)); 721 hwswitch_state & BIT(0));
663 seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n", 722 seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
@@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = {
693 752
694static void dell_update_rfkill(struct work_struct *ignored) 753static void dell_update_rfkill(struct work_struct *ignored)
695{ 754{
755 int hwswitch = 0;
696 int status; 756 int status;
757 int ret;
697 758
698 get_buffer(); 759 get_buffer();
760
699 dell_send_request(buffer, 17, 11); 761 dell_send_request(buffer, 17, 11);
762 ret = buffer->output[0];
700 status = buffer->output[1]; 763 status = buffer->output[1];
701 764
765 if (ret != 0)
766 goto out;
767
768 clear_buffer();
769
770 buffer->input[0] = 0x2;
771 dell_send_request(buffer, 17, 11);
772 ret = buffer->output[0];
773
774 if (ret == 0 && (status & BIT(0)))
775 hwswitch = buffer->output[1];
776
702 if (wifi_rfkill) { 777 if (wifi_rfkill) {
703 dell_rfkill_update_hw_state(wifi_rfkill, 1, status); 778 dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
704 dell_rfkill_update_sw_state(wifi_rfkill, 1, status); 779 dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
705 } 780 }
706 if (bluetooth_rfkill) { 781 if (bluetooth_rfkill) {
707 dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status); 782 dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
783 hwswitch);
708 dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); 784 dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
709 } 785 }
710 if (wwan_rfkill) { 786 if (wwan_rfkill) {
711 dell_rfkill_update_hw_state(wwan_rfkill, 3, status); 787 dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
712 dell_rfkill_update_sw_state(wwan_rfkill, 3, status); 788 dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
713 } 789 }
714 790
791 out:
715 release_buffer(); 792 release_buffer();
716} 793}
717static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); 794static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void)
773 850
774 get_buffer(); 851 get_buffer();
775 dell_send_request(buffer, 17, 11); 852 dell_send_request(buffer, 17, 11);
853 ret = buffer->output[0];
776 status = buffer->output[1]; 854 status = buffer->output[1];
777 buffer->input[0] = 0x2;
778 dell_send_request(buffer, 17, 11);
779 hwswitch_state = buffer->output[1];
780 release_buffer(); 855 release_buffer();
781 856
782 if (!(status & BIT(0))) { 857 /* dell wireless info smbios call is not supported */
783 if (force_rfkill) { 858 if (ret != 0)
784 /* No hwsitch, clear all hw-controlled bits */ 859 return 0;
785 hwswitch_state &= ~7; 860
786 } else { 861 /* rfkill is only tested on laptops with a hwswitch */
787 /* rfkill is only tested on laptops with a hwswitch */ 862 if (!(status & BIT(0)) && !force_rfkill)
788 return 0; 863 return 0;
789 }
790 }
791 864
792 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { 865 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
793 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev, 866 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
@@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void)
932 1005
933static int dell_send_intensity(struct backlight_device *bd) 1006static int dell_send_intensity(struct backlight_device *bd)
934{ 1007{
935 int ret = 0; 1008 int token;
1009 int ret;
1010
1011 token = find_token_location(BRIGHTNESS_TOKEN);
1012 if (token == -1)
1013 return -ENODEV;
936 1014
937 get_buffer(); 1015 get_buffer();
938 buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); 1016 buffer->input[0] = token;
939 buffer->input[1] = bd->props.brightness; 1017 buffer->input[1] = bd->props.brightness;
940 1018
941 if (buffer->input[0] == -1) {
942 ret = -ENODEV;
943 goto out;
944 }
945
946 if (power_supply_is_system_supplied() > 0) 1019 if (power_supply_is_system_supplied() > 0)
947 dell_send_request(buffer, 1, 2); 1020 dell_send_request(buffer, 1, 2);
948 else 1021 else
949 dell_send_request(buffer, 1, 1); 1022 dell_send_request(buffer, 1, 1);
950 1023
951 out: 1024 ret = dell_smi_error(buffer->output[0]);
1025
952 release_buffer(); 1026 release_buffer();
953 return ret; 1027 return ret;
954} 1028}
955 1029
956static int dell_get_intensity(struct backlight_device *bd) 1030static int dell_get_intensity(struct backlight_device *bd)
957{ 1031{
958 int ret = 0; 1032 int token;
1033 int ret;
959 1034
960 get_buffer(); 1035 token = find_token_location(BRIGHTNESS_TOKEN);
961 buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); 1036 if (token == -1)
1037 return -ENODEV;
962 1038
963 if (buffer->input[0] == -1) { 1039 get_buffer();
964 ret = -ENODEV; 1040 buffer->input[0] = token;
965 goto out;
966 }
967 1041
968 if (power_supply_is_system_supplied() > 0) 1042 if (power_supply_is_system_supplied() > 0)
969 dell_send_request(buffer, 0, 2); 1043 dell_send_request(buffer, 0, 2);
970 else 1044 else
971 dell_send_request(buffer, 0, 1); 1045 dell_send_request(buffer, 0, 1);
972 1046
973 ret = buffer->output[1]; 1047 if (buffer->output[0])
1048 ret = dell_smi_error(buffer->output[0]);
1049 else
1050 ret = buffer->output[1];
974 1051
975 out:
976 release_buffer(); 1052 release_buffer();
977 return ret; 1053 return ret;
978} 1054}
@@ -2036,6 +2112,7 @@ static void kbd_led_exit(void)
2036static int __init dell_init(void) 2112static int __init dell_init(void)
2037{ 2113{
2038 int max_intensity = 0; 2114 int max_intensity = 0;
2115 int token;
2039 int ret; 2116 int ret;
2040 2117
2041 if (!dmi_check_system(dell_device_table)) 2118 if (!dmi_check_system(dell_device_table))
@@ -2094,13 +2171,15 @@ static int __init dell_init(void)
2094 if (acpi_video_get_backlight_type() != acpi_backlight_vendor) 2171 if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
2095 return 0; 2172 return 0;
2096 2173
2097 get_buffer(); 2174 token = find_token_location(BRIGHTNESS_TOKEN);
2098 buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN); 2175 if (token != -1) {
2099 if (buffer->input[0] != -1) { 2176 get_buffer();
2177 buffer->input[0] = token;
2100 dell_send_request(buffer, 0, 2); 2178 dell_send_request(buffer, 0, 2);
2101 max_intensity = buffer->output[3]; 2179 if (buffer->output[0] == 0)
2180 max_intensity = buffer->output[3];
2181 release_buffer();
2102 } 2182 }
2103 release_buffer();
2104 2183
2105 if (max_intensity) { 2184 if (max_intensity) {
2106 struct backlight_properties props; 2185 struct backlight_properties props;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index d734763dab69..105cfffe82c6 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev {
96 struct completion cmd_complete; 96 struct completion cmd_complete;
97 97
98 /* The following PMC BARs share the same ACPI device with the IPC */ 98 /* The following PMC BARs share the same ACPI device with the IPC */
99 void *acpi_io_base; 99 resource_size_t acpi_io_base;
100 int acpi_io_size; 100 int acpi_io_size;
101 struct platform_device *tco_dev; 101 struct platform_device *tco_dev;
102 102
103 /* gcr */ 103 /* gcr */
104 void *gcr_base; 104 resource_size_t gcr_base;
105 int gcr_size; 105 int gcr_size;
106 106
107 /* punit */ 107 /* punit */
108 void *punit_base; 108 resource_size_t punit_base;
109 int punit_size; 109 int punit_size;
110 void *punit_base2; 110 resource_size_t punit_base2;
111 int punit_size2; 111 int punit_size2;
112 struct platform_device *punit_dev; 112 struct platform_device *punit_dev;
113} ipcdev; 113} ipcdev;
@@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void)
210 return ret; 210 return ret;
211} 211}
212 212
213/* 213/**
214 * intel_pmc_ipc_simple_command 214 * intel_pmc_ipc_simple_command() - Simple IPC command
215 * @cmd: command 215 * @cmd: IPC command code.
216 * @sub: sub type 216 * @sub: IPC command sub type.
217 *
218 * Send a simple IPC command to PMC when don't need to specify
219 * input/output data and source/dest pointers.
220 *
221 * Return: an IPC error code or 0 on success.
217 */ 222 */
218int intel_pmc_ipc_simple_command(int cmd, int sub) 223int intel_pmc_ipc_simple_command(int cmd, int sub)
219{ 224{
@@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
232} 237}
233EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); 238EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
234 239
235/* 240/**
236 * intel_pmc_ipc_raw_cmd 241 * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
237 * @cmd: command 242 * @cmd: IPC command code.
238 * @sub: sub type 243 * @sub: IPC command sub type.
239 * @in: input data 244 * @in: input data of this IPC command.
240 * @inlen: input length in bytes 245 * @inlen: input data length in bytes.
241 * @out: output data 246 * @out: output data of this IPC command.
242 * @outlen: output length in dwords 247 * @outlen: output data length in dwords.
243 * @sptr: data writing to SPTR register 248 * @sptr: data writing to SPTR register.
244 * @dptr: data writing to DPTR register 249 * @dptr: data writing to DPTR register.
250 *
251 * Send an IPC command to PMC with input/output data and source/dest pointers.
252 *
253 * Return: an IPC error code or 0 on success.
245 */ 254 */
246int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, 255int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
247 u32 outlen, u32 dptr, u32 sptr) 256 u32 outlen, u32 dptr, u32 sptr)
@@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
278} 287}
279EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); 288EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
280 289
281/* 290/**
282 * intel_pmc_ipc_command 291 * intel_pmc_ipc_command() - IPC command with input/output data
283 * @cmd: command 292 * @cmd: IPC command code.
284 * @sub: sub type 293 * @sub: IPC command sub type.
285 * @in: input data 294 * @in: input data of this IPC command.
286 * @inlen: input length in bytes 295 * @inlen: input data length in bytes.
287 * @out: output data 296 * @out: output data of this IPC command.
288 * @outlen: output length in dwords 297 * @outlen: output data length in dwords.
298 *
299 * Send an IPC command to PMC with input/output data.
300 *
301 * Return: an IPC error code or 0 on success.
289 */ 302 */
290int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen, 303int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
291 u32 *out, u32 outlen) 304 u32 *out, u32 outlen)
@@ -480,11 +493,11 @@ static int ipc_create_punit_device(void)
480 pdev->dev.parent = ipcdev.dev; 493 pdev->dev.parent = ipcdev.dev;
481 494
482 res = punit_res; 495 res = punit_res;
483 res->start = (resource_size_t)ipcdev.punit_base; 496 res->start = ipcdev.punit_base;
484 res->end = res->start + ipcdev.punit_size - 1; 497 res->end = res->start + ipcdev.punit_size - 1;
485 498
486 res = punit_res + PUNIT_RESOURCE_INTER; 499 res = punit_res + PUNIT_RESOURCE_INTER;
487 res->start = (resource_size_t)ipcdev.punit_base2; 500 res->start = ipcdev.punit_base2;
488 res->end = res->start + ipcdev.punit_size2 - 1; 501 res->end = res->start + ipcdev.punit_size2 - 1;
489 502
490 ret = platform_device_add_resources(pdev, punit_res, 503 ret = platform_device_add_resources(pdev, punit_res,
@@ -522,15 +535,15 @@ static int ipc_create_tco_device(void)
522 pdev->dev.parent = ipcdev.dev; 535 pdev->dev.parent = ipcdev.dev;
523 536
524 res = tco_res + TCO_RESOURCE_ACPI_IO; 537 res = tco_res + TCO_RESOURCE_ACPI_IO;
525 res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET; 538 res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
526 res->end = res->start + TCO_REGS_SIZE - 1; 539 res->end = res->start + TCO_REGS_SIZE - 1;
527 540
528 res = tco_res + TCO_RESOURCE_SMI_EN_IO; 541 res = tco_res + TCO_RESOURCE_SMI_EN_IO;
529 res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET; 542 res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
530 res->end = res->start + SMI_EN_SIZE - 1; 543 res->end = res->start + SMI_EN_SIZE - 1;
531 544
532 res = tco_res + TCO_RESOURCE_GCR_MEM; 545 res = tco_res + TCO_RESOURCE_GCR_MEM;
533 res->start = (resource_size_t)ipcdev.gcr_base; 546 res->start = ipcdev.gcr_base;
534 res->end = res->start + ipcdev.gcr_size - 1; 547 res->end = res->start + ipcdev.gcr_size - 1;
535 548
536 ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res)); 549 ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
@@ -589,7 +602,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
589 return -ENXIO; 602 return -ENXIO;
590 } 603 }
591 size = resource_size(res); 604 size = resource_size(res);
592 ipcdev.acpi_io_base = (void *)res->start; 605 ipcdev.acpi_io_base = res->start;
593 ipcdev.acpi_io_size = size; 606 ipcdev.acpi_io_size = size;
594 dev_info(&pdev->dev, "io res: %llx %x\n", 607 dev_info(&pdev->dev, "io res: %llx %x\n",
595 (long long)res->start, (int)resource_size(res)); 608 (long long)res->start, (int)resource_size(res));
@@ -601,7 +614,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
601 return -ENXIO; 614 return -ENXIO;
602 } 615 }
603 size = resource_size(res); 616 size = resource_size(res);
604 ipcdev.punit_base = (void *)res->start; 617 ipcdev.punit_base = res->start;
605 ipcdev.punit_size = size; 618 ipcdev.punit_size = size;
606 dev_info(&pdev->dev, "punit data res: %llx %x\n", 619 dev_info(&pdev->dev, "punit data res: %llx %x\n",
607 (long long)res->start, (int)resource_size(res)); 620 (long long)res->start, (int)resource_size(res));
@@ -613,7 +626,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
613 return -ENXIO; 626 return -ENXIO;
614 } 627 }
615 size = resource_size(res); 628 size = resource_size(res);
616 ipcdev.punit_base2 = (void *)res->start; 629 ipcdev.punit_base2 = res->start;
617 ipcdev.punit_size2 = size; 630 ipcdev.punit_size2 = size;
618 dev_info(&pdev->dev, "punit interface res: %llx %x\n", 631 dev_info(&pdev->dev, "punit interface res: %llx %x\n",
619 (long long)res->start, (int)resource_size(res)); 632 (long long)res->start, (int)resource_size(res));
@@ -637,7 +650,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
637 } 650 }
638 ipcdev.ipc_base = addr; 651 ipcdev.ipc_base = addr;
639 652
640 ipcdev.gcr_base = (void *)(res->start + size); 653 ipcdev.gcr_base = res->start + size;
641 ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE; 654 ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
642 dev_info(&pdev->dev, "ipc res: %llx %x\n", 655 dev_info(&pdev->dev, "ipc res: %llx %x\n",
643 (long long)res->start, (int)resource_size(res)); 656 (long long)res->start, (int)resource_size(res));
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 001b199a8c33..187d1086d15c 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
216 int nc; 216 int nc;
217 u32 offset = 0; 217 u32 offset = 0;
218 int err; 218 int err;
219 u8 cbuf[IPC_WWBUF_SIZE] = { }; 219 u8 cbuf[IPC_WWBUF_SIZE];
220 u32 *wbuf = (u32 *)&cbuf; 220 u32 *wbuf = (u32 *)&cbuf;
221 221
222 mutex_lock(&ipclock);
223
224 memset(cbuf, 0, sizeof(cbuf)); 222 memset(cbuf, 0, sizeof(cbuf));
225 223
224 mutex_lock(&ipclock);
225
226 if (ipcdev.pdev == NULL) { 226 if (ipcdev.pdev == NULL) {
227 mutex_unlock(&ipclock); 227 mutex_unlock(&ipclock);
228 return -ENODEV; 228 return -ENODEV;
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 515f33882ab8..49c1720df59a 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -7,7 +7,6 @@
7 * Bjorn Helgaas <bjorn.helgaas@hp.com> 7 * Bjorn Helgaas <bjorn.helgaas@hp.com>
8 */ 8 */
9 9
10#include <linux/acpi.h>
11#include <linux/pnp.h> 10#include <linux/pnp.h>
12#include <linux/device.h> 11#include <linux/device.h>
13#include <linux/init.h> 12#include <linux/init.h>
@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
23 {"", 0} 22 {"", 0}
24}; 23};
25 24
26#ifdef CONFIG_ACPI
27static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
28{
29 u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
30 return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
31}
32#else
33static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
34{
35 struct resource *res;
36
37 res = io ? request_region(start, length, desc) :
38 request_mem_region(start, length, desc);
39 if (res) {
40 res->flags &= ~IORESOURCE_BUSY;
41 return true;
42 }
43 return false;
44}
45#endif
46
47static void reserve_range(struct pnp_dev *dev, struct resource *r, int port) 25static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
48{ 26{
49 char *regionid; 27 char *regionid;
50 const char *pnpid = dev_name(&dev->dev); 28 const char *pnpid = dev_name(&dev->dev);
51 resource_size_t start = r->start, end = r->end; 29 resource_size_t start = r->start, end = r->end;
52 bool reserved; 30 struct resource *res;
53 31
54 regionid = kmalloc(16, GFP_KERNEL); 32 regionid = kmalloc(16, GFP_KERNEL);
55 if (!regionid) 33 if (!regionid)
56 return; 34 return;
57 35
58 snprintf(regionid, 16, "pnp %s", pnpid); 36 snprintf(regionid, 16, "pnp %s", pnpid);
59 reserved = __reserve_range(start, end - start + 1, !!port, regionid); 37 if (port)
60 if (!reserved) 38 res = request_region(start, end - start + 1, regionid);
39 else
40 res = request_mem_region(start, end - start + 1, regionid);
41 if (res)
42 res->flags &= ~IORESOURCE_BUSY;
43 else
61 kfree(regionid); 44 kfree(regionid);
62 45
63 /* 46 /*
@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
66 * have double reservations. 49 * have double reservations.
67 */ 50 */
68 dev_info(&dev->dev, "%pR %s reserved\n", r, 51 dev_info(&dev->dev, "%pR %s reserved\n", r,
69 reserved ? "has been" : "could not be"); 52 res ? "has been" : "could not be");
70} 53}
71 54
72static void reserve_resources_of_dev(struct pnp_dev *dev) 55static void reserve_resources_of_dev(struct pnp_dev *dev)
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..7fd4f511d78f 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -130,7 +130,7 @@ struct pm800_regulators {
130 .owner = THIS_MODULE, \ 130 .owner = THIS_MODULE, \
131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \ 131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
132 .vsel_reg = PM800_##vreg##_VOUT, \ 132 .vsel_reg = PM800_##vreg##_VOUT, \
133 .vsel_mask = 0x1f, \ 133 .vsel_mask = 0xf, \
134 .enable_reg = PM800_##ereg, \ 134 .enable_reg = PM800_##ereg, \
135 .enable_mask = 1 << (ebit), \ 135 .enable_mask = 1 << (ebit), \
136 .volt_table = ldo_volt_table, \ 136 .volt_table = ldo_volt_table, \
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..78387a6cbae5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
109static struct regulator *create_regulator(struct regulator_dev *rdev, 109static struct regulator *create_regulator(struct regulator_dev *rdev,
110 struct device *dev, 110 struct device *dev,
111 const char *supply_name); 111 const char *supply_name);
112static void _regulator_put(struct regulator *regulator);
112 113
113static const char *rdev_get_name(struct regulator_dev *rdev) 114static const char *rdev_get_name(struct regulator_dev *rdev)
114{ 115{
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev,
1105 1106
1106 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); 1107 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
1107 1108
1109 if (!try_module_get(supply_rdev->owner))
1110 return -ENODEV;
1111
1108 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); 1112 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
1109 if (rdev->supply == NULL) { 1113 if (rdev->supply == NULL) {
1110 err = -ENOMEM; 1114 err = -ENOMEM;
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1381 } 1385 }
1382 1386
1383 if (!r) { 1387 if (!r) {
1384 dev_err(dev, "Failed to resolve %s-supply for %s\n", 1388 if (have_full_constraints()) {
1385 rdev->supply_name, rdev->desc->name); 1389 r = dummy_regulator_rdev;
1386 return -EPROBE_DEFER; 1390 } else {
1391 dev_err(dev, "Failed to resolve %s-supply for %s\n",
1392 rdev->supply_name, rdev->desc->name);
1393 return -EPROBE_DEFER;
1394 }
1387 } 1395 }
1388 1396
1389 /* Recursively resolve the supply of the supply */ 1397 /* Recursively resolve the supply of the supply */
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1398 /* Cascade always-on state to supply */ 1406 /* Cascade always-on state to supply */
1399 if (_regulator_is_enabled(rdev)) { 1407 if (_regulator_is_enabled(rdev)) {
1400 ret = regulator_enable(rdev->supply); 1408 ret = regulator_enable(rdev->supply);
1401 if (ret < 0) 1409 if (ret < 0) {
1410 if (rdev->supply)
1411 _regulator_put(rdev->supply);
1402 return ret; 1412 return ret;
1413 }
1403 } 1414 }
1404 1415
1405 return 0; 1416 return 0;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..e94ddcf97722 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; 450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
451 451
452 if (of_property_read_bool(np, "maxim,enable-bias-control")) 452 if (of_property_read_bool(np, "maxim,enable-bias-control"))
453 pdata->control_flags |= MAX8973_BIAS_ENABLE; 453 pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
454 454
455 return pdata; 455 return pdata;
456} 456}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
34#include <linux/mfd/samsung/s2mps14.h> 34#include <linux/mfd/samsung/s2mps14.h>
35#include <linux/mfd/samsung/s2mpu02.h> 35#include <linux/mfd/samsung/s2mpu02.h>
36 36
37/* The highest number of possible regulators for supported devices. */
38#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
37struct s2mps11_info { 39struct s2mps11_info {
38 unsigned int rdev_num; 40 unsigned int rdev_num;
39 int ramp_delay2; 41 int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
49 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether 51 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
50 * the suspend mode was enabled. 52 * the suspend mode was enabled.
51 */ 53 */
52 unsigned long long s2mps14_suspend_state:50; 54 DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
53 55
54 /* Array of size rdev_num with GPIO-s for external sleep control */ 56 /* Array of size rdev_num with GPIO-s for external sleep control */
55 int *ext_control_gpio; 57 int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
500 switch (s2mps11->dev_type) { 502 switch (s2mps11->dev_type) {
501 case S2MPS13X: 503 case S2MPS13X:
502 case S2MPS14X: 504 case S2MPS14X:
503 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 505 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
504 val = S2MPS14_ENABLE_SUSPEND; 506 val = S2MPS14_ENABLE_SUSPEND;
505 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) 507 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
506 val = S2MPS14_ENABLE_EXT_CONTROL; 508 val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
508 val = rdev->desc->enable_mask; 510 val = rdev->desc->enable_mask;
509 break; 511 break;
510 case S2MPU02: 512 case S2MPU02:
511 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 513 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
512 val = S2MPU02_ENABLE_SUSPEND; 514 val = S2MPU02_ENABLE_SUSPEND;
513 else 515 else
514 val = rdev->desc->enable_mask; 516 val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
562 if (ret < 0) 564 if (ret < 0)
563 return ret; 565 return ret;
564 566
565 s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); 567 set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
566 /* 568 /*
567 * Don't enable suspend mode if regulator is already disabled because 569 * Don't enable suspend mode if regulator is already disabled because
568 * this would effectively for a short time turn on the regulator after 570 * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
960 case S2MPS11X: 962 case S2MPS11X:
961 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); 963 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
962 regulators = s2mps11_regulators; 964 regulators = s2mps11_regulators;
965 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
963 break; 966 break;
964 case S2MPS13X: 967 case S2MPS13X:
965 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); 968 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
966 regulators = s2mps13_regulators; 969 regulators = s2mps13_regulators;
970 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
967 break; 971 break;
968 case S2MPS14X: 972 case S2MPS14X:
969 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); 973 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
970 regulators = s2mps14_regulators; 974 regulators = s2mps14_regulators;
975 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
971 break; 976 break;
972 case S2MPU02: 977 case S2MPU02:
973 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); 978 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
974 regulators = s2mpu02_regulators; 979 regulators = s2mpu02_regulators;
980 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
975 break; 981 break;
976 default: 982 default:
977 dev_err(&pdev->dev, "Invalid device type: %u\n", 983 dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 4b62d1a875e4..2b08cac62f07 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -88,7 +88,7 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
88{ 88{
89 struct armada38x_rtc *rtc = dev_get_drvdata(dev); 89 struct armada38x_rtc *rtc = dev_get_drvdata(dev);
90 int ret = 0; 90 int ret = 0;
91 unsigned long time, flags; 91 unsigned long time;
92 92
93 ret = rtc_tm_to_time(tm, &time); 93 ret = rtc_tm_to_time(tm, &time);
94 94
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index c0090b698ff3..eab230be5a54 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
343 goto out_dispose_irq; 343 goto out_dispose_irq;
344 } 344 }
345 345
346 device_init_wakeup(&pdev->dev, 1);
347
346 rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev, 348 rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev,
347 &mtk_rtc_ops, THIS_MODULE); 349 &mtk_rtc_ops, THIS_MODULE);
348 if (IS_ERR(rtc->rtc_dev)) { 350 if (IS_ERR(rtc->rtc_dev)) {
@@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev)
351 goto out_free_irq; 353 goto out_free_irq;
352 } 354 }
353 355
354 device_init_wakeup(&pdev->dev, 1);
355
356 return 0; 356 return 0;
357 357
358out_free_irq: 358out_free_irq:
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ 5obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
6 6
7drivers-y += drivers/s390/built-in.o 7drivers-y += drivers/s390/built-in.o
8 8
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1aec8ff0b587..f73d2f579a7e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1863,6 +1863,33 @@ static void __dasd_device_check_expire(struct dasd_device *device)
1863} 1863}
1864 1864
1865/* 1865/*
1866 * return 1 when device is not eligible for IO
1867 */
1868static int __dasd_device_is_unusable(struct dasd_device *device,
1869 struct dasd_ccw_req *cqr)
1870{
1871 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
1872
1873 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1874 /* dasd is being set offline. */
1875 return 1;
1876 }
1877 if (device->stopped) {
1878 if (device->stopped & mask) {
1879 /* stopped and CQR will not change that. */
1880 return 1;
1881 }
1882 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1883 /* CQR is not able to change device to
1884 * operational. */
1885 return 1;
1886 }
1887 /* CQR required to get device operational. */
1888 }
1889 return 0;
1890}
1891
1892/*
1866 * Take a look at the first request on the ccw queue and check 1893 * Take a look at the first request on the ccw queue and check
1867 * if it needs to be started. 1894 * if it needs to be started.
1868 */ 1895 */
@@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device)
1876 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1903 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1877 if (cqr->status != DASD_CQR_QUEUED) 1904 if (cqr->status != DASD_CQR_QUEUED)
1878 return; 1905 return;
1879 /* when device is stopped, return request to previous layer 1906 /* if device is not usable return request to upper layer */
1880 * exception: only the disconnect or unresumed bits are set and the 1907 if (__dasd_device_is_unusable(device, cqr)) {
1881 * cqr is a path verification request
1882 */
1883 if (device->stopped &&
1884 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
1885 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
1886 cqr->intrc = -EAGAIN; 1908 cqr->intrc = -EAGAIN;
1887 cqr->status = DASD_CQR_CLEARED; 1909 cqr->status = DASD_CQR_CLEARED;
1888 dasd_schedule_device_bh(device); 1910 dasd_schedule_device_bh(device);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index a2597e683e79..ee3a6faae22a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
699 struct dasd_device, alias_list); 699 struct dasd_device, alias_list);
700 spin_unlock_irqrestore(&lcu->lock, flags); 700 spin_unlock_irqrestore(&lcu->lock, flags);
701 alias_priv = (struct dasd_eckd_private *) alias_device->private; 701 alias_priv = (struct dasd_eckd_private *) alias_device->private;
702 if ((alias_priv->count < private->count) && !alias_device->stopped) 702 if ((alias_priv->count < private->count) && !alias_device->stopped &&
703 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
703 return alias_device; 704 return alias_device;
704 else 705 else
705 return NULL; 706 return NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index aeed7969fd79..7bc6df3100ef 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -7,6 +7,7 @@
7#define KMSG_COMPONENT "sclp_early" 7#define KMSG_COMPONENT "sclp_early"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 9
10#include <linux/errno.h>
10#include <asm/ctl_reg.h> 11#include <asm/ctl_reg.h>
11#include <asm/sclp.h> 12#include <asm/sclp.h>
12#include <asm/ipl.h> 13#include <asm/ipl.h>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 08f1830cbfc4..01bf1f5cf2e9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
54 "Copyright IBM Corp. 2001, 2012"); 54 "Copyright IBM Corp. 2001, 2012");
55MODULE_LICENSE("GPL"); 55MODULE_LICENSE("GPL");
56 56
57static int zcrypt_hwrng_seed = 1;
58module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
59MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
60
57static DEFINE_SPINLOCK(zcrypt_device_lock); 61static DEFINE_SPINLOCK(zcrypt_device_lock);
58static LIST_HEAD(zcrypt_device_list); 62static LIST_HEAD(zcrypt_device_list);
59static int zcrypt_device_count = 0; 63static int zcrypt_device_count = 0;
@@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1373static struct hwrng zcrypt_rng_dev = { 1377static struct hwrng zcrypt_rng_dev = {
1374 .name = "zcrypt", 1378 .name = "zcrypt",
1375 .data_read = zcrypt_rng_data_read, 1379 .data_read = zcrypt_rng_data_read,
1380 .quality = 990,
1376}; 1381};
1377 1382
1378static int zcrypt_rng_device_add(void) 1383static int zcrypt_rng_device_add(void)
@@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void)
1387 goto out; 1392 goto out;
1388 } 1393 }
1389 zcrypt_rng_buffer_index = 0; 1394 zcrypt_rng_buffer_index = 0;
1395 if (!zcrypt_hwrng_seed)
1396 zcrypt_rng_dev.quality = 0;
1390 rc = hwrng_register(&zcrypt_rng_dev); 1397 rc = hwrng_register(&zcrypt_rng_dev);
1391 if (rc) 1398 if (rc)
1392 goto out_free; 1399 goto out_free;
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c351624..ce129e595b55 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.17" 42#define DRV_VERSION "1.6.0.17a"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286f1a9d..25436cd2860c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
425 unsigned long ptr; 425 unsigned long ptr;
426 struct fc_rport_priv *rdata; 426 struct fc_rport_priv *rdata;
427 spinlock_t *io_lock = NULL; 427 spinlock_t *io_lock = NULL;
428 int io_lock_acquired = 0;
428 429
429 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) 430 if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
430 return SCSI_MLQUEUE_HOST_BUSY; 431 return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
518 spin_lock_irqsave(io_lock, flags); 519 spin_lock_irqsave(io_lock, flags);
519 520
520 /* initialize rest of io_req */ 521 /* initialize rest of io_req */
522 io_lock_acquired = 1;
521 io_req->port_id = rport->port_id; 523 io_req->port_id = rport->port_id;
522 io_req->start_time = jiffies; 524 io_req->start_time = jiffies;
523 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; 525 CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
571 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); 573 (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
572 574
573 /* if only we issued IO, will we have the io lock */ 575 /* if only we issued IO, will we have the io lock */
574 if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED) 576 if (io_lock_acquired)
575 spin_unlock_irqrestore(io_lock, flags); 577 spin_unlock_irqrestore(io_lock, flags);
576 578
577 atomic_dec(&fnic->in_flight); 579 atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599{ 599{
600 struct ipr_trace_entry *trace_entry; 600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602 unsigned int trace_index;
602 603
603 trace_entry = &ioa_cfg->trace[atomic_add_return 604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
604 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; 605 trace_entry = &ioa_cfg->trace[trace_index];
605 trace_entry->time = jiffies; 606 trace_entry->time = jiffies;
606 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607 trace_entry->type = type; 608 trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1051 1052
1052static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053{ 1054{
1055 unsigned int hrrq;
1056
1054 if (ioa_cfg->hrrq_num == 1) 1057 if (ioa_cfg->hrrq_num == 1)
1055 return 0; 1058 hrrq = 0;
1056 else 1059 else {
1057 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; 1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062 }
1063 return hrrq;
1058} 1064}
1059 1065
1060/** 1066/**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6270 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6271 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266 unsigned long hrrq_flags; 6272 unsigned long lock_flags;
6267 6273
6268 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6274 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269 6275
6270 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6276 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271 scsi_dma_unmap(scsi_cmd); 6277 scsi_dma_unmap(scsi_cmd);
6272 6278
6273 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6274 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275 scsi_cmd->scsi_done(scsi_cmd); 6281 scsi_cmd->scsi_done(scsi_cmd);
6276 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6277 } else { 6283 } else {
6278 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6285 spin_lock(&ipr_cmd->hrrq->_lock);
6279 ipr_erp_start(ioa_cfg, ipr_cmd); 6286 ipr_erp_start(ioa_cfg, ipr_cmd);
6280 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6287 spin_unlock(&ipr_cmd->hrrq->_lock);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6281 } 6289 }
6282} 6290}
6283 6291
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
1486 1486
1487#define IPR_NUM_TRACE_INDEX_BITS 8 1487#define IPR_NUM_TRACE_INDEX_BITS 8
1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) 1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
1489#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
1489#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) 1490#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
1490 char trace_start[8]; 1491 char trace_start[8];
1491#define IPR_TRACE_START_LABEL "trace" 1492#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
733 if (resp) { 733 if (resp) {
734 resp(sp, fp, arg); 734 resp(sp, fp, arg);
735 res = true; 735 res = true;
736 } else if (!IS_ERR(fp)) {
737 fc_frame_free(fp);
738 } 736 }
739 737
740 spin_lock_bh(&ep->ex_lock); 738 spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1596 * If new exch resp handler is valid then call that 1594 * If new exch resp handler is valid then call that
1597 * first. 1595 * first.
1598 */ 1596 */
1599 fc_invoke_resp(ep, sp, fp); 1597 if (!fc_invoke_resp(ep, sp, fp))
1598 fc_frame_free(fp);
1600 1599
1601 fc_exch_release(ep); 1600 fc_exch_release(ep);
1602 return; 1601 return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1695 fc_exch_hold(ep); 1694 fc_exch_hold(ep);
1696 if (!rc) 1695 if (!rc)
1697 fc_exch_delete(ep); 1696 fc_exch_delete(ep);
1698 fc_invoke_resp(ep, sp, fp); 1697 if (!fc_invoke_resp(ep, sp, fp))
1698 fc_frame_free(fp);
1699 if (has_rec) 1699 if (has_rec)
1700 fc_exch_timer_set(ep, ep->r_a_tov); 1700 fc_exch_timer_set(ep, ep->r_a_tov);
1701 fc_exch_release(ep); 1701 fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..2d5909c4685c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@ restart:
1039 fc_fcp_pkt_hold(fsp); 1039 fc_fcp_pkt_hold(fsp);
1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1041 1041
1042 if (!fc_fcp_lock_pkt(fsp)) { 1042 spin_lock_bh(&fsp->scsi_pkt_lock);
1043 if (!(fsp->state & FC_SRB_COMPL)) {
1044 fsp->state |= FC_SRB_COMPL;
1045 /*
1046 * TODO: dropping scsi_pkt_lock and then reacquiring
1047 * again around fc_fcp_cleanup_cmd() is required,
1048 * since fc_fcp_cleanup_cmd() calls into
1049 * fc_seq_set_resp() and that func preempts cpu using
1050 * schedule. May be schedule and related code should be
1051 * removed instead of unlocking here to avoid scheduling
1052 * while atomic bug.
1053 */
1054 spin_unlock_bh(&fsp->scsi_pkt_lock);
1055
1043 fc_fcp_cleanup_cmd(fsp, error); 1056 fc_fcp_cleanup_cmd(fsp, error);
1057
1058 spin_lock_bh(&fsp->scsi_pkt_lock);
1044 fc_io_compl(fsp); 1059 fc_io_compl(fsp);
1045 fc_fcp_unlock_pkt(fsp);
1046 } 1060 }
1061 spin_unlock_bh(&fsp->scsi_pkt_lock);
1047 1062
1048 fc_fcp_pkt_release(fsp); 1063 fc_fcp_pkt_release(fsp);
1049 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1064 spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..98d9bb6ff725 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2941{ 2941{
2942 struct iscsi_conn *conn = cls_conn->dd_data; 2942 struct iscsi_conn *conn = cls_conn->dd_data;
2943 struct iscsi_session *session = conn->session; 2943 struct iscsi_session *session = conn->session;
2944 unsigned long flags;
2945 2944
2946 del_timer_sync(&conn->transport_timer); 2945 del_timer_sync(&conn->transport_timer);
2947 2946
2947 mutex_lock(&session->eh_mutex);
2948 spin_lock_bh(&session->frwd_lock); 2948 spin_lock_bh(&session->frwd_lock);
2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2950 if (session->leadconn == conn) { 2950 if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2956 } 2956 }
2957 spin_unlock_bh(&session->frwd_lock); 2957 spin_unlock_bh(&session->frwd_lock);
2958 2958
2959 /*
2960 * Block until all in-progress commands for this connection
2961 * time out or fail.
2962 */
2963 for (;;) {
2964 spin_lock_irqsave(session->host->host_lock, flags);
2965 if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
2966 spin_unlock_irqrestore(session->host->host_lock, flags);
2967 break;
2968 }
2969 spin_unlock_irqrestore(session->host->host_lock, flags);
2970 msleep_interruptible(500);
2971 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2972 "host_busy %d host_failed %d\n",
2973 atomic_read(&session->host->host_busy),
2974 session->host->host_failed);
2975 /*
2976 * force eh_abort() to unblock
2977 */
2978 wake_up(&conn->ehwait);
2979 }
2980
2981 /* flush queued up work because we free the connection below */ 2959 /* flush queued up work because we free the connection below */
2982 iscsi_suspend_tx(conn); 2960 iscsi_suspend_tx(conn);
2983 2961
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2994 if (session->leadconn == conn) 2972 if (session->leadconn == conn)
2995 session->leadconn = NULL; 2973 session->leadconn = NULL;
2996 spin_unlock_bh(&session->frwd_lock); 2974 spin_unlock_bh(&session->frwd_lock);
2975 mutex_unlock(&session->eh_mutex);
2997 2976
2998 iscsi_destroy_conn(cls_conn); 2977 iscsi_destroy_conn(cls_conn);
2999} 2978}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..437254e1c4de 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
738 ql_log(ql_log_info, vha, 0x706f, 738 ql_log(ql_log_info, vha, 0x706f,
739 "Issuing MPI reset.\n"); 739 "Issuing MPI reset.\n");
740 740
741 if (IS_QLA83XX(ha)) { 741 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
742 uint32_t idc_control; 742 uint32_t idc_control;
743 743
744 qla83xx_idc_lock(vha, 0); 744 qla83xx_idc_lock(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..8b011aef12bd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -67,10 +67,10 @@
67 * | | | 0xd031-0xd0ff | 67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe | 68 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe | 69 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe079 | | 70 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf072 | 0xf002 | 71 * | Target Mode Management | 0xf096 | 0xf002 |
72 * | | | 0xf046-0xf049 | 72 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000b | | 73 * | Target Mode Task Management | 0x1000d | |
74 * ---------------------------------------------------------------------- 74 * ----------------------------------------------------------------------
75 */ 75 */
76 76
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..9ad819edcd67 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
275 275
276struct req_que; 276struct req_que;
277struct qla_tgt_sess;
277 278
278/* 279/*
279 * (sd.h is not exported, hence local inclusion) 280 * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
2026 uint16_t port_id; 2027 uint16_t port_id;
2027 2028
2028 unsigned long retry_delay_timestamp; 2029 unsigned long retry_delay_timestamp;
2030 struct qla_tgt_sess *tgt_session;
2029} fc_port_t; 2031} fc_port_t;
2030 2032
2031#include "qla_mr.h" 2033#include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
3154/* Bit 21 of fw_attributes decides the MCTP capabilities */ 3156/* Bit 21 of fw_attributes decides the MCTP capabilities */
3155#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 3157#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
3156 ((ha)->fw_attributes_ext[0] & BIT_0)) 3158 ((ha)->fw_attributes_ext[0] & BIT_0))
3157#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) 3159#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3158#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) 3160#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3159#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) 3161#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
3160#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) 3162#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3161#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ 3163#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
3162 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) 3164 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
3163#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3165#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3164#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3166#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3165#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3167#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3166#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3168#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
3579 uint16_t fcoe_fcf_idx; 3581 uint16_t fcoe_fcf_idx;
3580 uint8_t fcoe_vn_port_mac[6]; 3582 uint8_t fcoe_vn_port_mac[6];
3581 3583
3584 /* list of commands waiting on workqueue */
3585 struct list_head qla_cmd_list;
3586 struct list_head qla_sess_op_cmd_list;
3587 spinlock_t cmd_list_lock;
3588
3589 /* Counter to detect races between ELS and RSCN events */
3590 atomic_t generation_tick;
3591 /* Time when global fcport update has been scheduled */
3592 int total_fcport_update_gen;
3593
3582 uint32_t vp_abort_cnt; 3594 uint32_t vp_abort_cnt;
3583 3595
3584 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3596 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..11f2f3279eab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
115 QLA_LOGIO_LOGIN_RETRIED : 0; 115 QLA_LOGIO_LOGIN_RETRIED : 0;
116 qla2x00_post_async_login_done_work(fcport->vha, fcport, 116 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117 lio->u.logio.data); 117 lio->u.logio.data);
118 } else if (sp->type == SRB_LOGOUT_CMD) {
119 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
118 } 120 }
119} 121}
120 122
@@ -497,7 +499,10 @@ void
497qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 499qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
498 uint16_t *data) 500 uint16_t *data)
499{ 501{
500 qla2x00_mark_device_lost(vha, fcport, 1, 0); 502 /* Don't re-login in target mode */
503 if (!fcport->tgt_session)
504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
505 qlt_logo_completion_handler(fcport, data[0]);
501 return; 506 return;
502} 507}
503 508
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1538 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1543 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1539 sizeof(uint16_t); 1544 sizeof(uint16_t);
1540 } else if (IS_FWI2_CAPABLE(ha)) { 1545 } else if (IS_FWI2_CAPABLE(ha)) {
1541 if (IS_QLA83XX(ha)) 1546 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1542 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 1547 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1543 else if (IS_QLA81XX(ha)) 1548 else if (IS_QLA81XX(ha))
1544 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1549 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1550 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1555 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1551 sizeof(uint32_t); 1556 sizeof(uint32_t);
1552 if (ha->mqenable) { 1557 if (ha->mqenable) {
1553 if (!IS_QLA83XX(ha)) 1558 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1554 mq_size = sizeof(struct qla2xxx_mq_chain); 1559 mq_size = sizeof(struct qla2xxx_mq_chain);
1555 /* 1560 /*
1556 * Allocate maximum buffer size for all queues. 1561 * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
2922{ 2927{
2923 fc_port_t *fcport = data; 2928 fc_port_t *fcport = data;
2924 struct fc_rport *rport; 2929 struct fc_rport *rport;
2925 scsi_qla_host_t *vha = fcport->vha;
2926 unsigned long flags; 2930 unsigned long flags;
2927 2931
2928 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2932 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2929 rport = fcport->drport ? fcport->drport: fcport->rport; 2933 rport = fcport->drport ? fcport->drport: fcport->rport;
2930 fcport->drport = NULL; 2934 fcport->drport = NULL;
2931 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2935 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2932 if (rport) { 2936 if (rport)
2933 fc_remote_port_delete(rport); 2937 fc_remote_port_delete(rport);
2934 /*
2935 * Release the target mode FC NEXUS in qla_target.c code
2936 * if target mod is enabled.
2937 */
2938 qlt_fc_port_deleted(vha, fcport);
2939 }
2940} 2938}
2941 2939
2942/** 2940/**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3303 * Create target mode FC NEXUS in qla_target.c if target mode is 3301 * Create target mode FC NEXUS in qla_target.c if target mode is
3304 * enabled.. 3302 * enabled..
3305 */ 3303 */
3304
3306 qlt_fc_port_added(vha, fcport); 3305 qlt_fc_port_added(vha, fcport);
3307 3306
3308 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 3307 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3341 3340
3342 if (IS_QLAFX00(vha->hw)) { 3341 if (IS_QLAFX00(vha->hw)) {
3343 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3342 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3344 qla2x00_reg_remote_port(vha, fcport); 3343 goto reg_port;
3345 return;
3346 } 3344 }
3347 fcport->login_retry = 0; 3345 fcport->login_retry = 0;
3348 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 3346 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3350 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3348 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3351 qla2x00_iidma_fcport(vha, fcport); 3349 qla2x00_iidma_fcport(vha, fcport);
3352 qla24xx_update_fcport_fcp_prio(vha, fcport); 3350 qla24xx_update_fcport_fcp_prio(vha, fcport);
3353 qla2x00_reg_remote_port(vha, fcport); 3351
3352reg_port:
3353 if (qla_ini_mode_enabled(vha))
3354 qla2x00_reg_remote_port(vha, fcport);
3355 else {
3356 /*
3357 * Create target mode FC NEXUS in qla_target.c
3358 */
3359 qlt_fc_port_added(vha, fcport);
3360 }
3354} 3361}
3355 3362
3356/* 3363/*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3375 LIST_HEAD(new_fcports); 3382 LIST_HEAD(new_fcports);
3376 struct qla_hw_data *ha = vha->hw; 3383 struct qla_hw_data *ha = vha->hw;
3377 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3384 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3385 int discovery_gen;
3378 3386
3379 /* If FL port exists, then SNS is present */ 3387 /* If FL port exists, then SNS is present */
3380 if (IS_FWI2_CAPABLE(ha)) 3388 if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3445 fcport->scan_state = QLA_FCPORT_SCAN; 3453 fcport->scan_state = QLA_FCPORT_SCAN;
3446 } 3454 }
3447 3455
3456 /* Mark the time right before querying FW for connected ports.
3457 * This process is long, asynchronous and by the time it's done,
3458 * collected information might not be accurate anymore. E.g.
3459 * disconnected port might have re-connected and a brand new
3460 * session has been created. In this case session's generation
3461 * will be newer than discovery_gen. */
3462 qlt_do_generation_tick(vha, &discovery_gen);
3463
3448 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3449 if (rval != QLA_SUCCESS) 3465 if (rval != QLA_SUCCESS)
3450 break; 3466 break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3460 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3461 continue; 3477 continue;
3462 3478
3463 if (fcport->scan_state == QLA_FCPORT_SCAN && 3479 if (fcport->scan_state == QLA_FCPORT_SCAN) {
3464 atomic_read(&fcport->state) == FCS_ONLINE) { 3480 if (qla_ini_mode_enabled(base_vha) &&
3465 qla2x00_mark_device_lost(vha, fcport, 3481 atomic_read(&fcport->state) == FCS_ONLINE) {
3466 ql2xplogiabsentdevice, 0); 3482 qla2x00_mark_device_lost(vha, fcport,
3467 if (fcport->loop_id != FC_NO_LOOP_ID && 3483 ql2xplogiabsentdevice, 0);
3468 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3484 if (fcport->loop_id != FC_NO_LOOP_ID &&
3469 fcport->port_type != FCT_INITIATOR && 3485 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3470 fcport->port_type != FCT_BROADCAST) { 3486 fcport->port_type != FCT_INITIATOR &&
3471 ha->isp_ops->fabric_logout(vha, 3487 fcport->port_type != FCT_BROADCAST) {
3472 fcport->loop_id, 3488 ha->isp_ops->fabric_logout(vha,
3473 fcport->d_id.b.domain, 3489 fcport->loop_id,
3474 fcport->d_id.b.area, 3490 fcport->d_id.b.domain,
3475 fcport->d_id.b.al_pa); 3491 fcport->d_id.b.area,
3476 qla2x00_clear_loop_id(fcport); 3492 fcport->d_id.b.al_pa);
3493 qla2x00_clear_loop_id(fcport);
3494 }
3495 } else if (!qla_ini_mode_enabled(base_vha)) {
3496 /*
3497 * In target mode, explicitly kill
3498 * sessions and log out of devices
3499 * that are gone, so that we don't
3500 * end up with an initiator using the
3501 * wrong ACL (if the fabric recycles
3502 * an FC address and we have a stale
3503 * session around) and so that we don't
3504 * report initiators that are no longer
3505 * on the fabric.
3506 */
3507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3508 "port gone, logging out/killing session: "
3509 "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3510 "scan_state %d\n",
3511 fcport->port_name,
3512 atomic_read(&fcport->state),
3513 fcport->flags, fcport->fc4_type,
3514 fcport->scan_state);
3515 qlt_fc_port_deleted(vha, fcport,
3516 discovery_gen);
3477 } 3517 }
3478 } 3518 }
3479 } 3519 }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3494 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 3534 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3495 continue; 3535 continue;
3496 3536
3537 /*
3538 * If we're not an initiator, skip looking for devices
3539 * and logging in. There's no reason for us to do it,
3540 * and it seems to actively cause problems in target
3541 * mode if we race with the initiator logging into us
3542 * (we might get the "port ID used" status back from
3543 * our login command and log out the initiator, which
3544 * seems to cause havoc).
3545 */
3546 if (!qla_ini_mode_enabled(base_vha)) {
3547 if (fcport->scan_state == QLA_FCPORT_FOUND) {
3548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3549 "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3550 "scan_state %d (initiator mode disabled; skipping "
3551 "login)\n", fcport->port_name,
3552 atomic_read(&fcport->state),
3553 fcport->flags, fcport->fc4_type,
3554 fcport->scan_state);
3555 }
3556 continue;
3557 }
3558
3497 if (fcport->loop_id == FC_NO_LOOP_ID) { 3559 if (fcport->loop_id == FC_NO_LOOP_ID) {
3498 fcport->loop_id = next_loopid; 3560 fcport->loop_id = next_loopid;
3499 rval = qla2x00_find_new_loop_id( 3561 rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3520 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3582 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3521 break; 3583 break;
3522 3584
3523 /* Find a new loop ID to use. */ 3585 /*
3524 fcport->loop_id = next_loopid; 3586 * If we're not an initiator, skip looking for devices
3525 rval = qla2x00_find_new_loop_id(base_vha, fcport); 3587 * and logging in. There's no reason for us to do it,
3526 if (rval != QLA_SUCCESS) { 3588 * and it seems to actively cause problems in target
3527 /* Ran out of IDs to use */ 3589 * mode if we race with the initiator logging into us
3528 break; 3590 * (we might get the "port ID used" status back from
3529 } 3591 * our login command and log out the initiator, which
3592 * seems to cause havoc).
3593 */
3594 if (qla_ini_mode_enabled(base_vha)) {
3595 /* Find a new loop ID to use. */
3596 fcport->loop_id = next_loopid;
3597 rval = qla2x00_find_new_loop_id(base_vha,
3598 fcport);
3599 if (rval != QLA_SUCCESS) {
3600 /* Ran out of IDs to use */
3601 break;
3602 }
3530 3603
3531 /* Login and update database */ 3604 /* Login and update database */
3532 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3605 qla2x00_fabric_dev_login(vha, fcport,
3606 &next_loopid);
3607 } else {
3608 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3609 "new port %8phC state 0x%x flags 0x%x fc4_type "
3610 "0x%x scan_state %d (initiator mode disabled; "
3611 "skipping login)\n",
3612 fcport->port_name,
3613 atomic_read(&fcport->state),
3614 fcport->flags, fcport->fc4_type,
3615 fcport->scan_state);
3616 }
3533 3617
3534 list_move_tail(&fcport->list, &vha->vp_fcports); 3618 list_move_tail(&fcport->list, &vha->vp_fcports);
3535 } 3619 }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3725 fcport->fp_speed = new_fcport->fp_speed; 3809 fcport->fp_speed = new_fcport->fp_speed;
3726 3810
3727 /* 3811 /*
3728 * If address the same and state FCS_ONLINE, nothing 3812 * If address the same and state FCS_ONLINE
3729 * changed. 3813 * (or in target mode), nothing changed.
3730 */ 3814 */
3731 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 3815 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3732 atomic_read(&fcport->state) == FCS_ONLINE) { 3816 (atomic_read(&fcport->state) == FCS_ONLINE ||
3817 !qla_ini_mode_enabled(base_vha))) {
3733 break; 3818 break;
3734 } 3819 }
3735 3820
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3749 * Log it out if still logged in and mark it for 3834 * Log it out if still logged in and mark it for
3750 * relogin later. 3835 * relogin later.
3751 */ 3836 */
3837 if (!qla_ini_mode_enabled(base_vha)) {
3838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3839 "port changed FC ID, %8phC"
3840 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3841 fcport->port_name,
3842 fcport->d_id.b.domain,
3843 fcport->d_id.b.area,
3844 fcport->d_id.b.al_pa,
3845 fcport->loop_id,
3846 new_fcport->d_id.b.domain,
3847 new_fcport->d_id.b.area,
3848 new_fcport->d_id.b.al_pa);
3849 fcport->d_id.b24 = new_fcport->d_id.b24;
3850 break;
3851 }
3852
3752 fcport->d_id.b24 = new_fcport->d_id.b24; 3853 fcport->d_id.b24 = new_fcport->d_id.b24;
3753 fcport->flags |= FCF_LOGIN_NEEDED; 3854 fcport->flags |= FCF_LOGIN_NEEDED;
3754 if (fcport->loop_id != FC_NO_LOOP_ID && 3855 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3768 if (found) 3869 if (found)
3769 continue; 3870 continue;
3770 /* If device was not in our fcports list, then add it. */ 3871 /* If device was not in our fcports list, then add it. */
3872 new_fcport->scan_state = QLA_FCPORT_FOUND;
3771 list_add_tail(&new_fcport->list, new_fcports); 3873 list_add_tail(&new_fcport->list, new_fcports);
3772 3874
3773 /* Allocate a new replacement fcport. */ 3875 /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4188 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 4290 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4189 spin_unlock_irqrestore(&ha->vport_slock, flags); 4291 spin_unlock_irqrestore(&ha->vport_slock, flags);
4190 qla2x00_rport_del(fcport); 4292 qla2x00_rport_del(fcport);
4293
4294 /*
4295 * Release the target mode FC NEXUS in
4296 * qla_target.c, if target mod is enabled.
4297 */
4298 qlt_fc_port_deleted(vha, fcport,
4299 base_vha->total_fcport_update_gen);
4300
4191 spin_lock_irqsave(&ha->vport_slock, flags); 4301 spin_lock_irqsave(&ha->vport_slock, flags);
4192 } 4302 }
4193 } 4303 }
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..6f02b26a35cf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1944 logio->control_flags = 1944 logio->control_flags =
1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1946 if (!sp->fcport->tgt_session ||
1947 !sp->fcport->tgt_session->keep_nport_handle)
1948 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1946 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1949 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1947 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1950 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1948 logio->port_id[1] = sp->fcport->d_id.b.area; 1951 logio->port_id[1] = sp->fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..b2f713ad9034 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2415 *orig_iocb_cnt = mcp->mb[10]; 2415 *orig_iocb_cnt = mcp->mb[10];
2416 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2416 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2417 *max_npiv_vports = mcp->mb[11]; 2417 *max_npiv_vports = mcp->mb[11];
2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) 2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2419 IS_QLA27XX(vha->hw)) && max_fcfs)
2419 *max_fcfs = mcp->mb[12]; 2420 *max_fcfs = mcp->mb[12];
2420 } 2421 }
2421 2422
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3898 spin_lock_irqsave(&ha->hardware_lock, flags); 3899 spin_lock_irqsave(&ha->hardware_lock, flags);
3899 if (!(rsp->options & BIT_0)) { 3900 if (!(rsp->options & BIT_0)) {
3900 WRT_REG_DWORD(rsp->rsp_q_out, 0); 3901 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3901 if (!IS_QLA83XX(ha)) 3902 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3902 WRT_REG_DWORD(rsp->rsp_q_in, 0); 3903 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3903 } 3904 }
3904 3905
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5345 mbx_cmd_t *mcp = &mc; 5346 mbx_cmd_t *mcp = &mc;
5346 struct qla_hw_data *ha = vha->hw; 5347 struct qla_hw_data *ha = vha->hw;
5347 5348
5348 if (!IS_QLA83XX(ha)) 5349 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5349 return QLA_FUNCTION_FAILED; 5350 return QLA_FUNCTION_FAILED;
5350 5351
5351 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5352 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..8a5cac8448c7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2504 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2504 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2505 req_length = REQUEST_ENTRY_CNT_24XX; 2505 req_length = REQUEST_ENTRY_CNT_24XX;
2506 rsp_length = RESPONSE_ENTRY_CNT_2300; 2506 rsp_length = RESPONSE_ENTRY_CNT_2300;
2507 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2507 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2508 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2508 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2509 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2509 ha->gid_list_info_size = 8; 2510 ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3229 spin_lock_irqsave(vha->host->host_lock, flags); 3230 spin_lock_irqsave(vha->host->host_lock, flags);
3230 fcport->drport = rport; 3231 fcport->drport = rport;
3231 spin_unlock_irqrestore(vha->host->host_lock, flags); 3232 spin_unlock_irqrestore(vha->host->host_lock, flags);
3233 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3232 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3234 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3233 qla2xxx_wake_dpc(base_vha); 3235 qla2xxx_wake_dpc(base_vha);
3234 } else { 3236 } else {
3235 fc_remote_port_delete(rport); 3237 int now;
3236 qlt_fc_port_deleted(vha, fcport); 3238 if (rport)
3239 fc_remote_port_delete(rport);
3240 qlt_do_generation_tick(vha, &now);
3241 qlt_fc_port_deleted(vha, fcport, now);
3237 } 3242 }
3238} 3243}
3239 3244
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3763 INIT_LIST_HEAD(&vha->vp_fcports); 3768 INIT_LIST_HEAD(&vha->vp_fcports);
3764 INIT_LIST_HEAD(&vha->work_list); 3769 INIT_LIST_HEAD(&vha->work_list);
3765 INIT_LIST_HEAD(&vha->list); 3770 INIT_LIST_HEAD(&vha->list);
3771 INIT_LIST_HEAD(&vha->qla_cmd_list);
3772 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
3766 3773
3767 spin_lock_init(&vha->work_lock); 3774 spin_lock_init(&vha->work_lock);
3775 spin_lock_init(&vha->cmd_list_lock);
3768 3776
3769 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3777 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3770 ql_dbg(ql_dbg_init, vha, 0x0041, 3778 ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..2feb5f38edcd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1697{ 1697{
1698 uint32_t led_select_value = 0; 1698 uint32_t led_select_value = 0;
1699 1699
1700 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1701 goto out; 1701 goto out;
1702 1702
1703 if (ha->port_no == 0) 1703 if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..58651ecbd88c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha); 115static void qlt_disable_vha(struct scsi_qla_host *vha);
116static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
116/* 121/*
117 * Global Variables 122 * Global Variables
118 */ 123 */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
122static DEFINE_MUTEX(qla_tgt_mutex); 127static DEFINE_MUTEX(qla_tgt_mutex);
123static LIST_HEAD(qla_tgt_glist); 128static LIST_HEAD(qla_tgt_glist);
124 129
130/* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
133{
134 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
135 *dest = atomic_inc_return(&base_vha->generation_tick);
136 /* memory barrier */
137 wmb();
138}
139
125/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 140/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126static struct qla_tgt_sess *qlt_find_sess_by_port_name( 141static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 struct qla_tgt *tgt, 142 struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
381 struct qla_tgt *tgt = sess->tgt; 396 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha; 397 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw; 398 struct qla_hw_data *ha = vha->hw;
399 unsigned long flags;
400 bool logout_started = false;
401 fc_port_t fcport;
402
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed);
384 410
385 BUG_ON(!tgt); 411 BUG_ON(!tgt);
412
413 if (sess->logout_on_delete) {
414 int rc;
415
416 memset(&fcport, 0, sizeof(fcport));
417 fcport.loop_id = sess->loop_id;
418 fcport.d_id = sess->s_id;
419 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
420 fcport.vha = vha;
421 fcport.tgt_session = sess;
422
423 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
424 if (rc != QLA_SUCCESS)
425 ql_log(ql_log_warn, vha, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
427 sess, rc);
428 else
429 logout_started = true;
430 }
431
386 /* 432 /*
387 * Release the target session for FC Nexus from fabric module code. 433 * Release the target session for FC Nexus from fabric module code.
388 */ 434 */
389 if (sess->se_sess != NULL) 435 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess); 436 ha->tgt.tgt_ops->free_session(sess);
391 437
438 if (logout_started) {
439 bool traced = false;
440
441 while (!ACCESS_ONCE(sess->logout_completed)) {
442 if (!traced) {
443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
444 "%s: waiting for sess %p logout\n",
445 __func__, sess);
446 traced = true;
447 }
448 msleep(100);
449 }
450
451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
452 "%s: sess %p logout completed\n",
453 __func__, sess);
454 }
455
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457
458 if (sess->plogi_ack_needed)
459 qlt_send_notify_ack(vha, &sess->tm_iocb,
460 0, 0, 0, 0, 0, 0);
461
462 list_del(&sess->sess_list_entry);
463
464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 466 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess); 467 "Unregistration of sess %p finished\n", sess);
394 468
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
409 483
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 484 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411 485
412 list_del(&sess->sess_list_entry); 486 if (!list_empty(&sess->del_list_entry))
413 if (sess->deleted) 487 list_del_init(&sess->del_list_entry);
414 list_del(&sess->del_list_entry); 488 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
415 489
416 INIT_WORK(&sess->free_work, qlt_free_session_done); 490 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work); 491 schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
431 505
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 506 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) { 507 if (loop_id == 0xFFFF) {
434#if 0 /* FIXME: Re-enable Global event handling.. */
435 /* Global event */ 508 /* Global event */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt); 510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
511#if 0 /* FIXME: do we need to choose a session here? */
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry); 514 typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
489 struct qla_tgt *tgt = sess->tgt; 563 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
491 565
492 if (sess->deleted) 566 if (sess->deleted) {
493 return; 567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
569 list_del(&sess->del_list_entry);
570 else
571 return;
572 }
494 573
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 574 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess); 575 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
498 sess->deleted = 1;
499 576
500 if (immediate) 577 if (immediate) {
501 dev_loss_tmo = 0; 578 dev_loss_tmo = 0;
579 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
580 list_add(&sess->del_list_entry, &tgt->del_sess_list);
581 } else {
582 sess->deleted = QLA_SESS_DELETION_PENDING;
583 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
584 }
502 585
503 sess->expires = jiffies + dev_loss_tmo * HZ; 586 sess->expires = jiffies + dev_loss_tmo * HZ;
504 587
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 588 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
507 "deletion in %u secs (expires: %lu) immed: %d\n", 590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 591 sess->vha->vp_idx, sess->port_name, sess->loop_id,
509 sess->expires, immediate); 592 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
593 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
594 sess->generation);
510 595
511 if (immediate) 596 if (immediate)
512 schedule_delayed_work(&tgt->sess_del_work, 0); 597 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
513 else 598 else
514 schedule_delayed_work(&tgt->sess_del_work, 599 schedule_delayed_work(&tgt->sess_del_work,
515 sess->expires - jiffies); 600 sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
578/* ha->hardware_lock supposed to be held on entry */ 663/* ha->hardware_lock supposed to be held on entry */
579static void qlt_undelete_sess(struct qla_tgt_sess *sess) 664static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580{ 665{
581 BUG_ON(!sess->deleted); 666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
582 667
583 list_del(&sess->del_list_entry); 668 list_del_init(&sess->del_list_entry);
584 sess->deleted = 0; 669 sess->deleted = 0;
585} 670}
586 671
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
599 del_list_entry); 684 del_list_entry);
600 elapsed = jiffies; 685 elapsed = jiffies;
601 if (time_after_eq(elapsed, sess->expires)) { 686 if (time_after_eq(elapsed, sess->expires)) {
602 qlt_undelete_sess(sess); 687 /* No turning back */
688 list_del_init(&sess->del_list_entry);
689 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
603 690
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 691 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n", 692 "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
643 fcport->d_id.b.al_pa, fcport->d_id.b.area, 730 fcport->d_id.b.al_pa, fcport->d_id.b.area,
644 fcport->loop_id); 731 fcport->loop_id);
645 732
733 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock,
736 flags);
737 return NULL;
738 }
739
646 if (sess->deleted) 740 if (sess->deleted)
647 qlt_undelete_sess(sess); 741 qlt_undelete_sess(sess);
648 742
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
652 746
653 if (sess->local && !local) 747 if (sess->local && !local)
654 sess->local = 0; 748 sess->local = 0;
749
750 qlt_do_generation_tick(vha, &sess->generation);
751
655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656 753
657 return sess; 754 return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
673 sess->s_id = fcport->d_id; 770 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id; 771 sess->loop_id = fcport->loop_id;
675 sess->local = local; 772 sess->local = local;
773 INIT_LIST_HEAD(&sess->del_list_entry);
774
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess->logout_on_delete = 1;
780 sess->keep_nport_handle = 0;
676 781
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
705 spin_lock_irqsave(&ha->hardware_lock, flags); 810 spin_lock_irqsave(&ha->hardware_lock, flags);
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++; 812 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation);
708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709 815
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
718} 824}
719 825
720/* 826/*
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 827 * Called from qla2x00_reg_remote_port()
722 */ 828 */
723void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 829void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724{ 830{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
750 mutex_unlock(&vha->vha_tgt.tgt_mutex); 856 mutex_unlock(&vha->vha_tgt.tgt_mutex);
751 857
752 spin_lock_irqsave(&ha->hardware_lock, flags); 858 spin_lock_irqsave(&ha->hardware_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return;
753 } else { 863 } else {
754 kref_get(&sess->se_sess->sess_kref); 864 kref_get(&sess->se_sess->sess_kref);
755 865
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
780 spin_unlock_irqrestore(&ha->hardware_lock, flags); 890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
781} 891}
782 892
783void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 893/*
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
896 */
897void
898qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
784{ 899{
785 struct qla_hw_data *ha = vha->hw;
786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
787 struct qla_tgt_sess *sess; 901 struct qla_tgt_sess *sess;
788 unsigned long flags;
789 902
790 if (!vha->hw->tgt.tgt_ops) 903 if (!vha->hw->tgt.tgt_ops)
791 return; 904 return;
792 905
793 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 906 if (!tgt)
794 return; 907 return;
795 908
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 if (tgt->tgt_stop) { 909 if (tgt->tgt_stop) {
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
799 return; 910 return;
800 } 911 }
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 if (!sess) { 913 if (!sess) {
803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 914 return;
915 }
916
917 if (max_gen - sess->generation < 0) {
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess->se_sess, sess, sess->port_name, max_gen,
922 sess->generation);
804 return; 923 return;
805 } 924 }
806 925
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
808 927
809 sess->local = 1; 928 sess->local = 1;
810 qlt_schedule_sess_for_deletion(sess, false); 929 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
812} 930}
813 931
814static inline int test_tgt_sess_count(struct qla_tgt *tgt) 932static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1175 FCP_TMF_CMPL, true); 1293 FCP_TMF_CMPL, true);
1176} 1294}
1177 1295
1296static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1297{
1298 struct qla_tgt_sess_op *op;
1299 struct qla_tgt_cmd *cmd;
1300
1301 spin_lock(&vha->cmd_list_lock);
1302
1303 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1304 if (tag == op->atio.u.isp24.exchange_addr) {
1305 op->aborted = true;
1306 spin_unlock(&vha->cmd_list_lock);
1307 return 1;
1308 }
1309 }
1310
1311 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1312 if (tag == cmd->atio.u.isp24.exchange_addr) {
1313 cmd->state = QLA_TGT_STATE_ABORTED;
1314 spin_unlock(&vha->cmd_list_lock);
1315 return 1;
1316 }
1317 }
1318
1319 spin_unlock(&vha->cmd_list_lock);
1320 return 0;
1321}
1322
1323/* drop cmds for the given lun
1324 * XXX only looks for cmds on the port through which lun reset was recieved
1325 * XXX does not go through the list of other port (which may have cmds
1326 * for the same lun)
1327 */
1328static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1329 uint32_t lun, uint8_t *s_id)
1330{
1331 struct qla_tgt_sess_op *op;
1332 struct qla_tgt_cmd *cmd;
1333 uint32_t key;
1334
1335 key = sid_to_key(s_id);
1336 spin_lock(&vha->cmd_list_lock);
1337 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1338 uint32_t op_key;
1339 uint32_t op_lun;
1340
1341 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1342 op_lun = scsilun_to_int(
1343 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1344 if (op_key == key && op_lun == lun)
1345 op->aborted = true;
1346 }
1347 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1348 uint32_t cmd_key;
1349 uint32_t cmd_lun;
1350
1351 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1352 cmd_lun = scsilun_to_int(
1353 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1354 if (cmd_key == key && cmd_lun == lun)
1355 cmd->state = QLA_TGT_STATE_ABORTED;
1356 }
1357 spin_unlock(&vha->cmd_list_lock);
1358}
1359
1178/* ha->hardware_lock supposed to be held on entry */ 1360/* ha->hardware_lock supposed to be held on entry */
1179static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1361static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1180 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1362 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1199 } 1381 }
1200 spin_unlock(&se_sess->sess_cmd_lock); 1382 spin_unlock(&se_sess->sess_cmd_lock);
1201 1383
1202 if (!found_lun) 1384 /* cmd not in LIO lists, look in qla list */
1203 return -ENOENT; 1385 if (!found_lun) {
1386 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1387 /* send TASK_ABORT response immediately */
1388 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1389 return 0;
1390 } else {
1391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1392 "unable to find cmd in driver or LIO for tag 0x%x\n",
1393 abts->exchange_addr_to_abort);
1394 return -ENOENT;
1395 }
1396 }
1204 1397
1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n", 1399 "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1284 return; 1477 return;
1285 } 1478 }
1286 1479
1480 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1481 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1482 return;
1483 }
1484
1287 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1485 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 if (rc != 0) { 1486 if (rc != 0) {
1289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1726 struct qla_hw_data *ha = vha->hw; 1924 struct qla_hw_data *ha = vha->hw;
1727 struct se_cmd *se_cmd = &cmd->se_cmd; 1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1728 1926
1729 if (unlikely(cmd->aborted)) {
1730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1731 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1732 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1733
1734 cmd->state = QLA_TGT_STATE_ABORTED;
1735 cmd->cmd_flags |= BIT_6;
1736
1737 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1738
1739 /* !! At this point cmd could be already freed !! */
1740 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1741 }
1742
1743 prm->cmd = cmd; 1927 prm->cmd = cmd;
1744 prm->tgt = tgt; 1928 prm->tgt = tgt;
1745 prm->rq_result = scsi_status; 1929 prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2301 unsigned long flags = 0; 2485 unsigned long flags = 0;
2302 int res; 2486 int res;
2303 2487
2488 spin_lock_irqsave(&ha->hardware_lock, flags);
2489 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2490 cmd->state = QLA_TGT_STATE_PROCESSED;
2491 if (cmd->sess->logout_completed)
2492 /* no need to terminate. FW already freed exchange. */
2493 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2494 else
2495 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2497 return 0;
2498 }
2499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2500
2304 memset(&prm, 0, sizeof(prm)); 2501 memset(&prm, 0, sizeof(prm));
2305 qlt_check_srr_debug(cmd, &xmit_type); 2502 qlt_check_srr_debug(cmd, &xmit_type);
2306 2503
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2313 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2510 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2314 &full_req_cnt); 2511 &full_req_cnt);
2315 if (unlikely(res != 0)) { 2512 if (unlikely(res != 0)) {
2316 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2317 return 0;
2318
2319 return res; 2513 return res;
2320 } 2514 }
2321 2515
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2345 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2539 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2346 else 2540 else
2347 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2541 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2348 if (unlikely(res != 0)) 2542 if (unlikely(res != 0)) {
2543 vha->req->cnt += full_req_cnt;
2349 goto out_unmap_unlock; 2544 goto out_unmap_unlock;
2350 2545 }
2351 2546
2352 pkt = (struct ctio7_to_24xx *)prm.pkt; 2547 pkt = (struct ctio7_to_24xx *)prm.pkt;
2353 2548
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2461 2656
2462 spin_lock_irqsave(&ha->hardware_lock, flags); 2657 spin_lock_irqsave(&ha->hardware_lock, flags);
2463 2658
2464 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { 2659 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2660 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2465 /* 2661 /*
2466 * Either a chip reset is active or this request was from 2662 * Either a chip reset is active or this request was from
2467 * previous life, just abort the processing. 2663 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2485 else 2681 else
2486 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2682 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2487 2683
2488 if (unlikely(res != 0)) 2684 if (unlikely(res != 0)) {
2685 vha->req->cnt += prm.req_cnt;
2489 goto out_unlock_free_unmap; 2686 goto out_unlock_free_unmap;
2687 }
2688
2490 pkt = (struct ctio7_to_24xx *)prm.pkt; 2689 pkt = (struct ctio7_to_24xx *)prm.pkt;
2491 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2690 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2492 CTIO7_FLAGS_STATUS_MODE_0); 2691 CTIO7_FLAGS_STATUS_MODE_0);
@@ -2651,6 +2850,89 @@ out:
2651 2850
2652/* If hardware_lock held on entry, might drop it, then reaquire */ 2851/* If hardware_lock held on entry, might drop it, then reaquire */
2653/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2852/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2853static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2854 struct imm_ntfy_from_isp *ntfy)
2855{
2856 struct nack_to_isp *nack;
2857 struct qla_hw_data *ha = vha->hw;
2858 request_t *pkt;
2859 int ret = 0;
2860
2861 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2862 "Sending TERM ELS CTIO (ha=%p)\n", ha);
2863
2864 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2865 if (pkt == NULL) {
2866 ql_dbg(ql_dbg_tgt, vha, 0xe080,
2867 "qla_target(%d): %s failed: unable to allocate "
2868 "request packet\n", vha->vp_idx, __func__);
2869 return -ENOMEM;
2870 }
2871
2872 pkt->entry_type = NOTIFY_ACK_TYPE;
2873 pkt->entry_count = 1;
2874 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2875
2876 nack = (struct nack_to_isp *)pkt;
2877 nack->ox_id = ntfy->ox_id;
2878
2879 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2880 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2881 nack->u.isp24.flags = ntfy->u.isp24.flags &
2882 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2883 }
2884
2885 /* terminate */
2886 nack->u.isp24.flags |=
2887 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2888
2889 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2890 nack->u.isp24.status = ntfy->u.isp24.status;
2891 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2892 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2893 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2894 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2895 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2896 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2897
2898 qla2x00_start_iocbs(vha, vha->req);
2899 return ret;
2900}
2901
2902static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2903 struct imm_ntfy_from_isp *imm, int ha_locked)
2904{
2905 unsigned long flags = 0;
2906 int rc;
2907
2908 if (qlt_issue_marker(vha, ha_locked) < 0)
2909 return;
2910
2911 if (ha_locked) {
2912 rc = __qlt_send_term_imm_notif(vha, imm);
2913
2914#if 0 /* Todo */
2915 if (rc == -ENOMEM)
2916 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2917#endif
2918 goto done;
2919 }
2920
2921 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2922 rc = __qlt_send_term_imm_notif(vha, imm);
2923
2924#if 0 /* Todo */
2925 if (rc == -ENOMEM)
2926 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2927#endif
2928
2929done:
2930 if (!ha_locked)
2931 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2932}
2933
2934/* If hardware_lock held on entry, might drop it, then reaquire */
2935/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2654static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2936static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2655 struct qla_tgt_cmd *cmd, 2937 struct qla_tgt_cmd *cmd,
2656 struct atio_from_isp *atio) 2938 struct atio_from_isp *atio)
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2715static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2997static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2716 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2998 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2717{ 2999{
2718 unsigned long flags; 3000 unsigned long flags = 0;
2719 int rc; 3001 int rc;
2720 3002
2721 if (qlt_issue_marker(vha, ha_locked) < 0) 3003 if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2731 rc = __qlt_send_term_exchange(vha, cmd, atio); 3013 rc = __qlt_send_term_exchange(vha, cmd, atio);
2732 if (rc == -ENOMEM) 3014 if (rc == -ENOMEM)
2733 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3015 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2734 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2735 3016
2736done: 3017done:
2737 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || 3018 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2738 !cmd->cmd_sent_to_fw)) { 3019 !cmd->cmd_sent_to_fw)) {
2739 if (!ha_locked && !in_interrupt()) 3020 if (cmd->sg_mapped)
2740 msleep(250); /* just in case */ 3021 qlt_unmap_sg(vha, cmd);
2741
2742 qlt_unmap_sg(vha, cmd);
2743 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3022 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2744 } 3023 }
3024
3025 if (!ha_locked)
3026 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3027
2745 return; 3028 return;
2746} 3029}
2747 3030
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2792 3075
2793} 3076}
2794 3077
3078void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3079{
3080 struct qla_tgt *tgt = cmd->tgt;
3081 struct scsi_qla_host *vha = tgt->vha;
3082 struct se_cmd *se_cmd = &cmd->se_cmd;
3083
3084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3085 "qla_target(%d): terminating exchange for aborted cmd=%p "
3086 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3087 se_cmd->tag);
3088
3089 cmd->state = QLA_TGT_STATE_ABORTED;
3090 cmd->cmd_flags |= BIT_6;
3091
3092 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
3093}
3094EXPORT_SYMBOL(qlt_abort_cmd);
3095
2795void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3096void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2796{ 3097{
2797 struct qla_tgt_sess *sess = cmd->sess; 3098 struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3015 dump_stack(); 3316 dump_stack();
3016 } 3317 }
3017 3318
3018 cmd->cmd_flags |= BIT_12; 3319 cmd->cmd_flags |= BIT_17;
3019 ha->tgt.tgt_ops->free_cmd(cmd); 3320 ha->tgt.tgt_ops->free_cmd(cmd);
3020} 3321}
3021 3322
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3177skip_term: 3478skip_term:
3178 3479
3179 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3480 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3180 ; 3481 cmd->cmd_flags |= BIT_12;
3181 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3482 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3182 int rx_status = 0; 3483 int rx_status = 0;
3183 3484
@@ -3191,9 +3492,11 @@ skip_term:
3191 ha->tgt.tgt_ops->handle_data(cmd); 3492 ha->tgt.tgt_ops->handle_data(cmd);
3192 return; 3493 return;
3193 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3494 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3495 cmd->cmd_flags |= BIT_18;
3194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3195 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3497 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3196 } else { 3498 } else {
3499 cmd->cmd_flags |= BIT_19;
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3198 "qla_target(%d): A command in state (%d) should " 3501 "qla_target(%d): A command in state (%d) should "
3199 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3502 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
3205 dump_stack(); 3508 dump_stack();
3206 } 3509 }
3207 3510
3208
3209 ha->tgt.tgt_ops->free_cmd(cmd); 3511 ha->tgt.tgt_ops->free_cmd(cmd);
3210} 3512}
3211 3513
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3263 if (tgt->tgt_stop) 3565 if (tgt->tgt_stop)
3264 goto out_term; 3566 goto out_term;
3265 3567
3568 if (cmd->state == QLA_TGT_STATE_ABORTED) {
3569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3570 "cmd with tag %u is aborted\n",
3571 cmd->atio.u.isp24.exchange_addr);
3572 goto out_term;
3573 }
3574
3266 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3575 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3267 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3576 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3268 cmd->unpacked_lun = scsilun_to_int( 3577 cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
3316static void qlt_do_work(struct work_struct *work) 3625static void qlt_do_work(struct work_struct *work)
3317{ 3626{
3318 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3627 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3628 scsi_qla_host_t *vha = cmd->vha;
3629 unsigned long flags;
3630
3631 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3632 list_del(&cmd->cmd_list);
3633 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3319 3634
3320 __qlt_do_work(cmd); 3635 __qlt_do_work(cmd);
3321} 3636}
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3345 cmd->loop_id = sess->loop_id; 3660 cmd->loop_id = sess->loop_id;
3346 cmd->conf_compl_supported = sess->conf_compl_supported; 3661 cmd->conf_compl_supported = sess->conf_compl_supported;
3347 3662
3663 cmd->cmd_flags = 0;
3664 cmd->jiffies_at_alloc = get_jiffies_64();
3665
3666 cmd->reset_count = vha->hw->chip_reset;
3667
3348 return cmd; 3668 return cmd;
3349} 3669}
3350 3670
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3362 unsigned long flags; 3682 unsigned long flags;
3363 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3683 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3364 3684
3685 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3686 list_del(&op->cmd_list);
3687 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3688
3689 if (op->aborted) {
3690 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3691 "sess_op with tag %u is aborted\n",
3692 op->atio.u.isp24.exchange_addr);
3693 goto out_term;
3694 }
3695
3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3696 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3366 "qla_target(%d): Unable to find wwn login" 3697 "qla_target(%d): Unable to find wwn login"
3367 " (s_id %x:%x:%x), trying to create it manually\n", 3698 " (s_id %x:%x:%x), trying to create it manually\n",
3368 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3699 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3369 3700
3370 if (op->atio.u.raw.entry_count > 1) { 3701 if (op->atio.u.raw.entry_count > 1) {
3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3372 "Dropping multy entry atio %p\n", &op->atio); 3703 "Dropping multy entry atio %p\n", &op->atio);
3373 goto out_term; 3704 goto out_term;
3374 } 3705 }
3375 3706
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3434 3765
3435 memcpy(&op->atio, atio, sizeof(*atio)); 3766 memcpy(&op->atio, atio, sizeof(*atio));
3436 op->vha = vha; 3767 op->vha = vha;
3768
3769 spin_lock(&vha->cmd_list_lock);
3770 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3771 spin_unlock(&vha->cmd_list_lock);
3772
3437 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3773 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3438 queue_work(qla_tgt_wq, &op->work); 3774 queue_work(qla_tgt_wq, &op->work);
3439 return 0; 3775 return 0;
3440 } 3776 }
3777
3778 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3779 * session deletion, but it's still in sess_del_work wq */
3780 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3781 ql_dbg(ql_dbg_io, vha, 0x3061,
3782 "New command while old session %p is being deleted\n",
3783 sess);
3784 return -EFAULT;
3785 }
3786
3441 /* 3787 /*
3442 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3788 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3443 */ 3789 */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3451 return -ENOMEM; 3797 return -ENOMEM;
3452 } 3798 }
3453 3799
3454 cmd->cmd_flags = 0;
3455 cmd->jiffies_at_alloc = get_jiffies_64();
3456
3457 cmd->reset_count = vha->hw->chip_reset;
3458
3459 cmd->cmd_in_wq = 1; 3800 cmd->cmd_in_wq = 1;
3460 cmd->cmd_flags |= BIT_0; 3801 cmd->cmd_flags |= BIT_0;
3802
3803 spin_lock(&vha->cmd_list_lock);
3804 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3805 spin_unlock(&vha->cmd_list_lock);
3806
3461 INIT_WORK(&cmd->work, qlt_do_work); 3807 INIT_WORK(&cmd->work, qlt_do_work);
3462 queue_work(qla_tgt_wq, &cmd->work); 3808 queue_work(qla_tgt_wq, &cmd->work);
3463 return 0; 3809 return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3471 struct scsi_qla_host *vha = sess->vha; 3817 struct scsi_qla_host *vha = sess->vha;
3472 struct qla_hw_data *ha = vha->hw; 3818 struct qla_hw_data *ha = vha->hw;
3473 struct qla_tgt_mgmt_cmd *mcmd; 3819 struct qla_tgt_mgmt_cmd *mcmd;
3820 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3474 int res; 3821 int res;
3475 uint8_t tmr_func; 3822 uint8_t tmr_func;
3476 3823
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3511 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3858 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3512 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3859 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3513 tmr_func = TMR_LUN_RESET; 3860 tmr_func = TMR_LUN_RESET;
3861 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
3514 break; 3862 break;
3515 3863
3516 case QLA_TGT_CLEAR_TS: 3864 case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3599 sizeof(struct atio_from_isp)); 3947 sizeof(struct atio_from_isp));
3600 } 3948 }
3601 3949
3950 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
3951 return -EFAULT;
3952
3602 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3953 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3603} 3954}
3604 3955
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
3664 return __qlt_abort_task(vha, iocb, sess); 4015 return __qlt_abort_task(vha, iocb, sess);
3665} 4016}
3666 4017
4018void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4019{
4020 if (fcport->tgt_session) {
4021 if (rc != MBS_COMMAND_COMPLETE) {
4022 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4023 "%s: se_sess %p / sess %p from"
4024 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4025 " LOGO failed: %#x\n",
4026 __func__,
4027 fcport->tgt_session->se_sess,
4028 fcport->tgt_session,
4029 fcport->port_name, fcport->loop_id,
4030 fcport->d_id.b.domain, fcport->d_id.b.area,
4031 fcport->d_id.b.al_pa, rc);
4032 }
4033
4034 fcport->tgt_session->logout_completed = 1;
4035 }
4036}
4037
4038static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4039 struct imm_ntfy_from_isp *b)
4040{
4041 struct imm_ntfy_from_isp tmp;
4042 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4043 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4044 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4045}
4046
4047/*
4048* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4049*
4050* Schedules sessions with matching port_id/loop_id but different wwn for
4051* deletion. Returns existing session with matching wwn if present.
4052* Null otherwise.
4053*/
4054static struct qla_tgt_sess *
4055qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4056 port_id_t port_id, uint16_t loop_id)
4057{
4058 struct qla_tgt_sess *sess = NULL, *other_sess;
4059 uint64_t other_wwn;
4060
4061 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4062
4063 other_wwn = wwn_to_u64(other_sess->port_name);
4064
4065 if (wwn == other_wwn) {
4066 WARN_ON(sess);
4067 sess = other_sess;
4068 continue;
4069 }
4070
4071 /* find other sess with nport_id collision */
4072 if (port_id.b24 == other_sess->s_id.b24) {
4073 if (loop_id != other_sess->loop_id) {
4074 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4075 "Invalidating sess %p loop_id %d wwn %llx.\n",
4076 other_sess, other_sess->loop_id, other_wwn);
4077
4078 /*
4079 * logout_on_delete is set by default, but another
4080 * session that has the same s_id/loop_id combo
4081 * might have cleared it when requested this session
4082 * deletion, so don't touch it
4083 */
4084 qlt_schedule_sess_for_deletion(other_sess, true);
4085 } else {
4086 /*
4087 * Another wwn used to have our s_id/loop_id
4088 * combo - kill the session, but don't log out
4089 */
4090 sess->logout_on_delete = 0;
4091 qlt_schedule_sess_for_deletion(other_sess,
4092 true);
4093 }
4094 continue;
4095 }
4096
4097 /* find other sess with nport handle collision */
4098 if (loop_id == other_sess->loop_id) {
4099 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4100 "Invalidating sess %p loop_id %d wwn %llx.\n",
4101 other_sess, other_sess->loop_id, other_wwn);
4102
4103 /* Same loop_id but different s_id
4104 * Ok to kill and logout */
4105 qlt_schedule_sess_for_deletion(other_sess, true);
4106 }
4107 }
4108
4109 return sess;
4110}
4111
4112/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4113static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4114{
4115 struct qla_tgt_sess_op *op;
4116 struct qla_tgt_cmd *cmd;
4117 uint32_t key;
4118 int count = 0;
4119
4120 key = (((u32)s_id->b.domain << 16) |
4121 ((u32)s_id->b.area << 8) |
4122 ((u32)s_id->b.al_pa));
4123
4124 spin_lock(&vha->cmd_list_lock);
4125 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4126 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4127 if (op_key == key) {
4128 op->aborted = true;
4129 count++;
4130 }
4131 }
4132 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4133 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4134 if (cmd_key == key) {
4135 cmd->state = QLA_TGT_STATE_ABORTED;
4136 count++;
4137 }
4138 }
4139 spin_unlock(&vha->cmd_list_lock);
4140
4141 return count;
4142}
4143
3667/* 4144/*
3668 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4145 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3669 */ 4146 */
3670static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4147static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3671 struct imm_ntfy_from_isp *iocb) 4148 struct imm_ntfy_from_isp *iocb)
3672{ 4149{
4150 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4151 struct qla_hw_data *ha = vha->hw;
4152 struct qla_tgt_sess *sess = NULL;
4153 uint64_t wwn;
4154 port_id_t port_id;
4155 uint16_t loop_id;
4156 uint16_t wd3_lo;
3673 int res = 0; 4157 int res = 0;
3674 4158
4159 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4160
4161 port_id.b.domain = iocb->u.isp24.port_id[2];
4162 port_id.b.area = iocb->u.isp24.port_id[1];
4163 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4164 port_id.b.rsvd_1 = 0;
4165
4166 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4167
3675 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 4168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3676 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 4169 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3677 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 4170 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3678 4171
4172 /* res = 1 means ack at the end of thread
4173 * res = 0 means ack async/later.
4174 */
3679 switch (iocb->u.isp24.status_subcode) { 4175 switch (iocb->u.isp24.status_subcode) {
3680 case ELS_PLOGI: 4176 case ELS_PLOGI:
3681 case ELS_FLOGI: 4177
4178 /* Mark all stale commands in qla_tgt_wq for deletion */
4179 abort_cmds_for_s_id(vha, &port_id);
4180
4181 if (wwn)
4182 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4183 port_id, loop_id);
4184
4185 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
4186 res = 1;
4187 break;
4188 }
4189
4190 if (sess->plogi_ack_needed) {
4191 /*
4192 * Initiator sent another PLOGI before last PLOGI could
4193 * finish. Swap plogi iocbs and terminate old one
4194 * without acking, new one will get acked when session
4195 * deletion completes.
4196 */
4197 ql_log(ql_log_warn, sess->vha, 0xf094,
4198 "sess %p received double plogi.\n", sess);
4199
4200 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4201
4202 qlt_send_term_imm_notif(vha, iocb, 1);
4203
4204 res = 0;
4205 break;
4206 }
4207
4208 res = 0;
4209
4210 /*
4211 * Save immediate Notif IOCB for Ack when sess is done
4212 * and being deleted.
4213 */
4214 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
4215 sess->plogi_ack_needed = 1;
4216
4217 /*
4218 * Under normal circumstances we want to release nport handle
4219 * during LOGO process to avoid nport handle leaks inside FW.
4220 * The exception is when LOGO is done while another PLOGI with
4221 * the same nport handle is waiting as might be the case here.
4222 * Note: there is always a possibily of a race where session
4223 * deletion has already started for other reasons (e.g. ACL
4224 * removal) and now PLOGI arrives:
4225 * 1. if PLOGI arrived in FW after nport handle has been freed,
4226 * FW must have assigned this PLOGI a new/same handle and we
4227 * can proceed ACK'ing it as usual when session deletion
4228 * completes.
4229 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4230 * bit reached it, the handle has now been released. We'll
4231 * get an error when we ACK this PLOGI. Nothing will be sent
4232 * back to initiator. Initiator should eventually retry
4233 * PLOGI and situation will correct itself.
4234 */
4235 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4236 (sess->s_id.b24 == port_id.b24));
4237 qlt_schedule_sess_for_deletion(sess, true);
4238 break;
4239
3682 case ELS_PRLI: 4240 case ELS_PRLI:
4241 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4242
4243 if (wwn)
4244 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4245 loop_id);
4246
4247 if (sess != NULL) {
4248 if (sess->deleted) {
4249 /*
4250 * Impatient initiator sent PRLI before last
4251 * PLOGI could finish. Will force him to re-try,
4252 * while last one finishes.
4253 */
4254 ql_log(ql_log_warn, sess->vha, 0xf095,
4255 "sess %p PRLI received, before plogi ack.\n",
4256 sess);
4257 qlt_send_term_imm_notif(vha, iocb, 1);
4258 res = 0;
4259 break;
4260 }
4261
4262 /*
4263 * This shouldn't happen under normal circumstances,
4264 * since we have deleted the old session during PLOGI
4265 */
4266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4267 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4268 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4269
4270 sess->local = 0;
4271 sess->loop_id = loop_id;
4272 sess->s_id = port_id;
4273
4274 if (wd3_lo & BIT_7)
4275 sess->conf_compl_supported = 1;
4276
4277 }
4278 res = 1; /* send notify ack */
4279
4280 /* Make session global (not used in fabric mode) */
4281 if (ha->current_topology != ISP_CFG_F) {
4282 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4283 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4284 qla2xxx_wake_dpc(vha);
4285 } else {
4286 /* todo: else - create sess here. */
4287 res = 1; /* send notify ack */
4288 }
4289
4290 break;
4291
3683 case ELS_LOGO: 4292 case ELS_LOGO:
3684 case ELS_PRLO: 4293 case ELS_PRLO:
3685 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4294 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3697 break; 4306 break;
3698 } 4307 }
3699 4308
4309 case ELS_FLOGI: /* should never happen */
3700 default: 4310 default:
3701 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3702 "qla_target(%d): Unsupported ELS command %x " 4312 "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5012 if (!sess) 5622 if (!sess)
5013 goto out_term; 5623 goto out_term;
5014 } else { 5624 } else {
5625 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5626 sess = NULL;
5627 goto out_term;
5628 }
5629
5015 kref_get(&sess->se_sess->sess_kref); 5630 kref_get(&sess->se_sess->sess_kref);
5016 } 5631 }
5017 5632
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5066 if (!sess) 5681 if (!sess)
5067 goto out_term; 5682 goto out_term;
5068 } else { 5683 } else {
5684 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5685 sess = NULL;
5686 goto out_term;
5687 }
5688
5069 kref_get(&sess->se_sess->sess_kref); 5689 kref_get(&sess->se_sess->sess_kref);
5070 } 5690 }
5071 5691
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5552 6172
5553 /* Adjust ring index */ 6173 /* Adjust ring index */
5554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6174 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6175 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
5555} 6176}
5556 6177
5557void 6178void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5793 if (!QLA_TGT_MODE_ENABLED()) 6414 if (!QLA_TGT_MODE_ENABLED())
5794 return; 6415 return;
5795 6416
5796 if (ha->mqenable || IS_QLA83XX(ha)) { 6417 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
5797 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6418 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5798 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6419 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5799 } else { 6420 } else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
167 uint32_t srr_rel_offs; 167 uint32_t srr_rel_offs;
168 uint16_t srr_ui; 168 uint16_t srr_ui;
169 uint16_t srr_ox_id; 169 uint16_t srr_ox_id;
170 uint8_t reserved_4[19]; 170 union {
171 struct {
172 uint8_t node_name[8];
173 } plogi; /* PLOGI/ADISC/PDISC */
174 struct {
175 /* PRLI word 3 bit 0-15 */
176 uint16_t wd3_lo;
177 uint8_t resv0[6];
178 } prli;
179 struct {
180 uint8_t port_id[3];
181 uint8_t resv1;
182 uint16_t nport_handle;
183 uint16_t resv2;
184 } req_els;
185 } u;
186 uint8_t port_name[8];
187 uint8_t resv3[3];
171 uint8_t vp_index; 188 uint8_t vp_index;
172 uint32_t reserved_5; 189 uint32_t reserved_5;
173 uint8_t port_id[3]; 190 uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
234 uint8_t reserved[2]; 251 uint8_t reserved[2];
235 uint16_t ox_id; 252 uint16_t ox_id;
236} __packed; 253} __packed;
254#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
237#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 255#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
238#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 256#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
239 257
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
790#define FC_TM_REJECT 4 808#define FC_TM_REJECT 4
791#define FC_TM_FAILED 5 809#define FC_TM_FAILED 5
792 810
793/*
794 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
795 * terminated, so no more actions is needed and success should be returned
796 * to target.
797 */
798#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
799
800#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) 811#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
801#define pci_dma_lo32(a) (a & 0xffffffff) 812#define pci_dma_lo32(a) (a & 0xffffffff)
802#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) 813#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha; 885 struct scsi_qla_host *vha;
875 struct atio_from_isp atio; 886 struct atio_from_isp atio;
876 struct work_struct work; 887 struct work_struct work;
888 struct list_head cmd_list;
889 bool aborted;
890};
891
892enum qla_sess_deletion {
893 QLA_SESS_DELETION_NONE = 0,
894 QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
895 * this one */
896 QLA_SESS_DELETION_IN_PROGRESS = 2,
877}; 897};
878 898
879/* 899/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
884 port_id_t s_id; 904 port_id_t s_id;
885 905
886 unsigned int conf_compl_supported:1; 906 unsigned int conf_compl_supported:1;
887 unsigned int deleted:1; 907 unsigned int deleted:2;
888 unsigned int local:1; 908 unsigned int local:1;
909 unsigned int logout_on_delete:1;
910 unsigned int plogi_ack_needed:1;
911 unsigned int keep_nport_handle:1;
912
913 unsigned char logout_completed;
914
915 int generation;
889 916
890 struct se_session *se_sess; 917 struct se_session *se_sess;
891 struct scsi_qla_host *vha; 918 struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
897 924
898 uint8_t port_name[WWN_SIZE]; 925 uint8_t port_name[WWN_SIZE];
899 struct work_struct free_work; 926 struct work_struct free_work;
927
928 union {
929 struct imm_ntfy_from_isp tm_iocb;
930 };
900}; 931};
901 932
902struct qla_tgt_cmd { 933struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
912 unsigned int conf_compl_supported:1; 943 unsigned int conf_compl_supported:1;
913 unsigned int sg_mapped:1; 944 unsigned int sg_mapped:1;
914 unsigned int free_sg:1; 945 unsigned int free_sg:1;
915 unsigned int aborted:1; /* Needed in case of SRR */
916 unsigned int write_data_transferred:1; 946 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1; 947 unsigned int ctx_dsd_alloced:1;
918 unsigned int q_full:1; 948 unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
961 * BIT_14 - Back end data received/sent. 991 * BIT_14 - Back end data received/sent.
962 * BIT_15 - SRR prepare ctio 992 * BIT_15 - SRR prepare ctio
963 * BIT_16 - complete free 993 * BIT_16 - complete free
994 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
995 * BIT_18 - completion w/abort status
996 * BIT_19 - completion w/unknown status
964 */ 997 */
965 uint32_t cmd_flags; 998 uint32_t cmd_flags;
966}; 999};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
1026 struct qla_tgt_cmd *cmd; 1059 struct qla_tgt_cmd *cmd;
1027}; 1060};
1028 1061
1062/* Check for Switch reserved address */
1063#define IS_SW_RESV_ADDR(_s_id) \
1064 ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
1065
1029#define QLA_TGT_XMIT_DATA 1 1066#define QLA_TGT_XMIT_DATA 1
1030#define QLA_TGT_XMIT_STATUS 2 1067#define QLA_TGT_XMIT_STATUS 2
1031#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) 1068#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
1043extern void qlt_lport_deregister(struct scsi_qla_host *); 1080extern void qlt_lport_deregister(struct scsi_qla_host *);
1044extern void qlt_unreg_sess(struct qla_tgt_sess *); 1081extern void qlt_unreg_sess(struct qla_tgt_sess *);
1045extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1082extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1046extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1083extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
1047extern int __init qlt_init(void); 1084extern int __init qlt_init(void);
1048extern void qlt_exit(void); 1085extern void qlt_exit(void);
1049extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1086extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1073 ha->host->active_mode |= MODE_INITIATOR; 1110 ha->host->active_mode |= MODE_INITIATOR;
1074} 1111}
1075 1112
1113static inline uint32_t sid_to_key(const uint8_t *s_id)
1114{
1115 uint32_t key;
1116
1117 key = (((unsigned long)s_id[0] << 16) |
1118 ((unsigned long)s_id[1] << 8) |
1119 (unsigned long)s_id[2]);
1120 return key;
1121}
1122
1076/* 1123/*
1077 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1124 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1078 */ 1125 */
1079extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1126extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1080extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1127extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1081extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1128extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1129extern void qlt_abort_cmd(struct qla_tgt_cmd *);
1082extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1130extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1083extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1131extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1084extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1132extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
1109extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1157extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1110extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1158extern void qlt_83xx_iospace_config(struct qla_hw_data *);
1111extern int qlt_free_qfull_cmds(struct scsi_qla_host *); 1159extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1160extern void qlt_logo_completion_handler(fc_port_t *, int);
1161extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1112 1162
1113#endif /* __QLA_TARGET_H */ 1163#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..9224a06646e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 374{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 375 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 376 struct qla_tgt_cmd, se_cmd);
377 377 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 378 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
380 380
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3000); 408 3 * HZ);
409 return 0; 409 return 0;
410 } 410 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
541 cmd->cmd_flags |= BIT_4; 541 cmd->cmd_flags |= BIT_4;
542 cmd->bufflen = se_cmd->data_length; 542 cmd->bufflen = se_cmd->data_length;
543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
544 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
545 544
546 cmd->sg_cnt = se_cmd->t_data_nents; 545 cmd->sg_cnt = se_cmd->t_data_nents;
547 cmd->sg = se_cmd->t_data_sg; 546 cmd->sg = se_cmd->t_data_sg;
548 cmd->offset = 0; 547 cmd->offset = 0;
549 cmd->cmd_flags |= BIT_3;
550 548
551 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 549 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
552 cmd->prot_sg = se_cmd->t_prot_sg; 550 cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
571 cmd->sg_cnt = 0; 569 cmd->sg_cnt = 0;
572 cmd->offset = 0; 570 cmd->offset = 0;
573 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 571 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
574 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
575 if (cmd->cmd_flags & BIT_5) { 572 if (cmd->cmd_flags & BIT_5) {
576 pr_crit("Bit_5 already set for cmd = %p.\n", cmd); 573 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
577 dump_stack(); 574 dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
636{ 633{
637 struct qla_tgt_cmd *cmd = container_of(se_cmd, 634 struct qla_tgt_cmd *cmd = container_of(se_cmd,
638 struct qla_tgt_cmd, se_cmd); 635 struct qla_tgt_cmd, se_cmd);
639 struct scsi_qla_host *vha = cmd->vha; 636 qlt_abort_cmd(cmd);
640 struct qla_hw_data *ha = vha->hw;
641
642 if (!cmd->sg_mapped)
643 return;
644
645 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
646 cmd->sg_mapped = 0;
647} 637}
648 638
649static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 639static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1149 return NULL; 1139 return NULL;
1150 } 1140 }
1151 1141
1152 key = (((unsigned long)s_id[0] << 16) | 1142 key = sid_to_key(s_id);
1153 ((unsigned long)s_id[1] << 8) |
1154 (unsigned long)s_id[2]);
1155 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1143 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1156 1144
1157 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1145 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1186 void *slot; 1174 void *slot;
1187 int rc; 1175 int rc;
1188 1176
1189 key = (((unsigned long)s_id[0] << 16) | 1177 key = sid_to_key(s_id);
1190 ((unsigned long)s_id[1] << 8) |
1191 (unsigned long)s_id[2]);
1192 pr_debug("set_sess_by_s_id: %06x\n", key); 1178 pr_debug("set_sess_by_s_id: %06x\n", key);
1193 1179
1194 slot = btree_lookup32(&lport->lport_fcport_map, key); 1180 slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1544 } 1530 }
1545 1531
1546 sess->conf_compl_supported = conf_compl_supported; 1532 sess->conf_compl_supported = conf_compl_supported;
1533
1534 /* Reset logout parameters to default */
1535 sess->logout_on_delete = 1;
1536 sess->keep_nport_handle = 0;
1547} 1537}
1548 1538
1549/* 1539/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..6457a8a0db9c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
26#include <linux/blkdev.h> 26#include <linux/blkdev.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/jiffies.h> 28#include <linux/jiffies.h>
29#include <asm/unaligned.h>
30 29
31#include <scsi/scsi.h> 30#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h> 31#include <scsi/scsi_cmnd.h>
@@ -944,7 +943,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
944 scmd->sdb.length); 943 scmd->sdb.length);
945 scmd->sdb.table.sgl = &ses->sense_sgl; 944 scmd->sdb.table.sgl = &ses->sense_sgl;
946 scmd->sc_data_direction = DMA_FROM_DEVICE; 945 scmd->sc_data_direction = DMA_FROM_DEVICE;
947 scmd->sdb.table.nents = 1; 946 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
948 scmd->cmnd[0] = REQUEST_SENSE; 947 scmd->cmnd[0] = REQUEST_SENSE;
949 scmd->cmnd[4] = scmd->sdb.length; 948 scmd->cmnd[4] = scmd->sdb.length;
950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 949 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
2523 } 2522 }
2524} 2523}
2525EXPORT_SYMBOL(scsi_build_sense_buffer); 2524EXPORT_SYMBOL(scsi_build_sense_buffer);
2526
2527/**
2528 * scsi_set_sense_information - set the information field in a
2529 * formatted sense data buffer
2530 * @buf: Where to build sense data
2531 * @info: 64-bit information value to be set
2532 *
2533 **/
2534void scsi_set_sense_information(u8 *buf, u64 info)
2535{
2536 if ((buf[0] & 0x7f) == 0x72) {
2537 u8 *ucp, len;
2538
2539 len = buf[7];
2540 ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
2541 if (!ucp) {
2542 buf[7] = len + 0xa;
2543 ucp = buf + 8 + len;
2544 }
2545 ucp[0] = 0;
2546 ucp[1] = 0xa;
2547 ucp[2] = 0x80; /* Valid bit */
2548 ucp[3] = 0;
2549 put_unaligned_be64(info, &ucp[4]);
2550 } else if ((buf[0] & 0x7f) == 0x70) {
2551 buf[0] |= 0x80;
2552 put_unaligned_be64(info, &buf[3]);
2553 }
2554}
2555EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..448ebdaa3d69 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
583 583
584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585{ 585{
586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
587 return; 587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589} 589}
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
597 597
598 if (mq) { 598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = nents; 600 sdb->table.nents = sdb->table.orig_nents = nents;
601 sg_init_table(sdb->table.sgl, sdb->table.nents); 601 sg_init_table(sdb->table.sgl, nents);
602 return 0; 602 return 0;
603 } 603 }
604 first_chunk = sdb->table.sgl; 604 first_chunk = sdb->table.sgl;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1d2163..e4b799837948 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
217{ 217{
218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
219 struct scsi_device *sdev = to_scsi_device(dev); 219 struct scsi_device *sdev = to_scsi_device(dev);
220 int err; 220 int err = 0;
221 221
222 err = blk_pre_runtime_suspend(sdev->request_queue); 222 if (pm && pm->runtime_suspend) {
223 if (err) 223 err = blk_pre_runtime_suspend(sdev->request_queue);
224 return err; 224 if (err)
225 if (pm && pm->runtime_suspend) 225 return err;
226 err = pm->runtime_suspend(dev); 226 err = pm->runtime_suspend(dev);
227 blk_post_runtime_suspend(sdev->request_queue, err); 227 blk_post_runtime_suspend(sdev->request_queue, err);
228 228 }
229 return err; 229 return err;
230} 230}
231 231
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 248 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249 int err = 0; 249 int err = 0;
250 250
251 blk_pre_runtime_resume(sdev->request_queue); 251 if (pm && pm->runtime_resume) {
252 if (pm && pm->runtime_resume) 252 blk_pre_runtime_resume(sdev->request_queue);
253 err = pm->runtime_resume(dev); 253 err = pm->runtime_resume(dev);
254 blk_post_runtime_resume(sdev->request_queue, err); 254 blk_post_runtime_resume(sdev->request_queue, err);
255 255 }
256 return err; 256 return err;
257} 257}
258 258
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1ac38e73df7e..9ad41168d26d 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
859 859
860 depth = simple_strtoul(buf, NULL, 0); 860 depth = simple_strtoul(buf, NULL, 0);
861 861
862 if (depth < 1 || depth > sht->can_queue) 862 if (depth < 1 || depth > sdev->host->can_queue)
863 return -EINVAL; 863 return -EINVAL;
864 864
865 retval = sht->change_queue_depth(sdev, depth); 865 retval = sht->change_queue_depth(sdev, depth);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index a85292b1d09d..e3cd3ece4412 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
203 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); 203 return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
204} 204}
205 205
206static int srp_parse_tmo(int *tmo, const char *buf) 206int srp_parse_tmo(int *tmo, const char *buf)
207{ 207{
208 int res = 0; 208 int res = 0;
209 209
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
214 214
215 return res; 215 return res;
216} 216}
217EXPORT_SYMBOL(srp_parse_tmo);
217 218
218static ssize_t show_reconnect_delay(struct device *dev, 219static ssize_t show_reconnect_delay(struct device *dev,
219 struct device_attribute *attr, char *buf) 220 struct device_attribute *attr, char *buf)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..a20da8c25b4f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2770 max_xfer = sdkp->max_xfer_blocks; 2770 max_xfer = sdkp->max_xfer_blocks;
2771 max_xfer <<= ilog2(sdp->sector_size) - 9; 2771 max_xfer <<= ilog2(sdp->sector_size) - 9;
2772 2772
2773 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2773 sdkp->disk->queue->limits.max_sectors =
2774 max_xfer); 2774 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
2775 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2775
2776 set_capacity(disk, sdkp->capacity); 2776 set_capacity(disk, sdkp->capacity);
2777 sd_config_write_same(sdkp); 2777 sd_config_write_same(sdkp);
2778 kfree(buffer); 2778 kfree(buffer);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 3f25b8fa921d..871f3553987d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1329,9 +1329,9 @@ static int st_open(struct inode *inode, struct file *filp)
1329 spin_lock(&st_use_lock); 1329 spin_lock(&st_use_lock);
1330 STp->in_use = 0; 1330 STp->in_use = 0;
1331 spin_unlock(&st_use_lock); 1331 spin_unlock(&st_use_lock);
1332 scsi_tape_put(STp);
1333 if (resumed) 1332 if (resumed)
1334 scsi_autopm_put_device(STp->device); 1333 scsi_autopm_put_device(STp->device);
1334 scsi_tape_put(STp);
1335 return retval; 1335 return retval;
1336 1336
1337} 1337}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
949{ 949{
950 struct Scsi_Host *shost; 950 struct Scsi_Host *shost;
951 struct virtio_scsi *vscsi; 951 struct virtio_scsi *vscsi;
952 int err, host_prot; 952 int err;
953 u32 sg_elems, num_targets; 953 u32 sg_elems, num_targets;
954 u32 cmd_per_lun; 954 u32 cmd_per_lun;
955 u32 num_queues; 955 u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
1009 1009
1010#ifdef CONFIG_BLK_DEV_INTEGRITY 1010#ifdef CONFIG_BLK_DEV_INTEGRITY
1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
1012 int host_prot;
1013
1012 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1014 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
1013 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 1015 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
1014 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 1016 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
612 612
613config SPI_ZYNQMP_GQSPI 613config SPI_ZYNQMP_GQSPI
614 tristate "Xilinx ZynqMP GQSPI controller" 614 tristate "Xilinx ZynqMP GQSPI controller"
615 depends on SPI_MASTER 615 depends on SPI_MASTER && HAS_DMA
616 help 616 help
617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. 617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
618 618
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
40#define SPFI_CONTROL_SOFT_RESET BIT(11) 40#define SPFI_CONTROL_SOFT_RESET BIT(11)
41#define SPFI_CONTROL_SEND_DMA BIT(10) 41#define SPFI_CONTROL_SEND_DMA BIT(10)
42#define SPFI_CONTROL_GET_DMA BIT(9) 42#define SPFI_CONTROL_GET_DMA BIT(9)
43#define SPFI_CONTROL_SE BIT(8)
43#define SPFI_CONTROL_TMODE_SHIFT 5 44#define SPFI_CONTROL_TMODE_SHIFT 5
44#define SPFI_CONTROL_TMODE_MASK 0x7 45#define SPFI_CONTROL_TMODE_MASK 0x7
45#define SPFI_CONTROL_TMODE_SINGLE 0 46#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
491 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 492 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
492 xfer->rx_nbits == SPI_NBITS_QUAD) 493 xfer->rx_nbits == SPI_NBITS_QUAD)
493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 494 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
495 val |= SPFI_CONTROL_SE;
494 spfi_writel(spfi, val, SPFI_CONTROL); 496 spfi_writel(spfi, val, SPFI_CONTROL);
495} 497}
496 498
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
201{ 201{
202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
203 203
204 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 204 if (spi_imx->dma_is_inited
205 && (transfer->len > spi_imx->tx_wml)) 205 && transfer->len > spi_imx->rx_wml * sizeof(u32)
206 && transfer->len > spi_imx->tx_wml * sizeof(u32))
206 return true; 207 return true;
207 return false; 208 return false;
208} 209}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
214 case GQSPI_SELECT_FLASH_CS_BOTH: 214 case GQSPI_SELECT_FLASH_CS_BOTH:
215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | 215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
216 GQSPI_GENFIFO_CS_UPPER; 216 GQSPI_GENFIFO_CS_UPPER;
217 break;
217 case GQSPI_SELECT_FLASH_CS_UPPER: 218 case GQSPI_SELECT_FLASH_CS_UPPER:
218 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; 219 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
219 break; 220 break;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -693,6 +693,7 @@ static struct class *spidev_class;
693#ifdef CONFIG_OF 693#ifdef CONFIG_OF
694static const struct of_device_id spidev_dt_ids[] = { 694static const struct of_device_id spidev_dt_ids[] = {
695 { .compatible = "rohm,dh2228fv" }, 695 { .compatible = "rohm,dh2228fv" },
696 { .compatible = "lineartechnology,ltc2488" },
696 {}, 697 {},
697}; 698};
698MODULE_DEVICE_TABLE(of, spidev_dt_ids); 699MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
index b8ee81840666..3f287c48e082 100644
--- a/drivers/staging/board/Kconfig
+++ b/drivers/staging/board/Kconfig
@@ -1,6 +1,6 @@
1config STAGING_BOARD 1config STAGING_BOARD
2 bool "Staging Board Support" 2 bool "Staging Board Support"
3 depends on OF_ADDRESS 3 depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
4 help 4 help
5 Select to enable per-board staging support code. 5 Select to enable per-board staging support code.
6 6
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
1266 if (index == das1801hc || index == das1802hc) 1266 if (index == das1801hc || index == das1802hc)
1267 return board; 1267 return board;
1268 index = das1801hc; 1268 index = das1801hc;
1269 break;
1269 default: 1270 default:
1270 dev_err(dev->class_dev, 1271 dev_err(dev->class_dev,
1271 "Board model: probe returned 0x%x (unknown, please report)\n", 1272 "Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 7125eb955ae5..8a9d4a0de129 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -31,7 +31,6 @@
31#define DEBUG_PORTAL_ALLOC 31#define DEBUG_PORTAL_ALLOC
32#define DEBUG_SUBSYSTEM S_LND 32#define DEBUG_SUBSYSTEM S_LND
33 33
34#include <asm/irq.h>
35#include <linux/crc32.h> 34#include <linux/crc32.h>
36#include <linux/errno.h> 35#include <linux/errno.h>
37#include <linux/if.h> 36#include <linux/if.h>
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
40 40
41#define DEBUG_SUBSYSTEM D_OTHER 41#define DEBUG_SUBSYSTEM D_OTHER
42 42
43#include <linux/unaligned/access_ok.h> 43#include <asm/unaligned.h>
44 44
45#include "../include/obd_support.h" 45#include "../include/obd_support.h"
46#include "../include/lustre_debug.h" 46#include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index ed040fbb7df8..69bdc8f29b59 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1418,7 +1418,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1418 1418
1419 priv->current_aid = conf->aid; 1419 priv->current_aid = conf->aid;
1420 1420
1421 if (changed & BSS_CHANGED_BSSID) { 1421 if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1422 unsigned long flags; 1422 unsigned long flags;
1423 1423
1424 spin_lock_irqsave(&priv->lock, flags); 1424 spin_lock_irqsave(&priv->lock, flags);
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1483 } 1483 }
1484 } 1484 }
1485 1485
1486 if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { 1486 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 if (conf->assoc) { 1487 priv->op_mode != NL80211_IFTYPE_AP) {
1488 if (conf->assoc && conf->beacon_rate) {
1488 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, 1489 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1489 conf->sync_tsf); 1490 conf->sync_tsf);
1490 1491
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f97323f19acf..af572d718135 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
701 701
702 priv->current_aid = conf->aid; 702 priv->current_aid = conf->aid;
703 703
704 if (changed & BSS_CHANGED_BSSID) 704 if (changed & BSS_CHANGED_BSSID && conf->bssid)
705 vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid); 705 vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
706 706
707 707
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..fd092909a457 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
968 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; 968 cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
969 969
970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; 970 conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
971 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 971 if (hdr->flags & ISCSI_FLAG_CMD_READ)
972 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 972 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
973 } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) 973 else
974 cmd->targ_xfer_tag = 0xFFFFFFFF; 974 cmd->targ_xfer_tag = 0xFFFFFFFF;
975 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 975 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
976 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 976 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
@@ -3998,7 +3998,13 @@ get_immediate:
3998 } 3998 }
3999 3999
4000transport_err: 4000transport_err:
4001 iscsit_take_action_for_connection_exit(conn); 4001 /*
4002 * Avoid the normal connection failure code-path if this connection
4003 * is still within LOGIN mode, and iscsi_np process context is
4004 * responsible for cleaning up the early connection failure.
4005 */
4006 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
4007 iscsit_take_action_for_connection_exit(conn);
4002out: 4008out:
4003 return 0; 4009 return 0;
4004} 4010}
@@ -4082,7 +4088,7 @@ reject:
4082 4088
4083int iscsi_target_rx_thread(void *arg) 4089int iscsi_target_rx_thread(void *arg)
4084{ 4090{
4085 int ret; 4091 int ret, rc;
4086 u8 buffer[ISCSI_HDR_LEN], opcode; 4092 u8 buffer[ISCSI_HDR_LEN], opcode;
4087 u32 checksum = 0, digest = 0; 4093 u32 checksum = 0, digest = 0;
4088 struct iscsi_conn *conn = arg; 4094 struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
4092 * connection recovery / failure event can be triggered externally. 4098 * connection recovery / failure event can be triggered externally.
4093 */ 4099 */
4094 allow_signal(SIGINT); 4100 allow_signal(SIGINT);
4101 /*
4102 * Wait for iscsi_post_login_handler() to complete before allowing
4103 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4104 */
4105 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4106 if (rc < 0)
4107 return 0;
4095 4108
4096 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4109 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4097 struct completion comp; 4110 struct completion comp;
4098 int rc;
4099 4111
4100 init_completion(&comp); 4112 init_completion(&comp);
4101 rc = wait_for_completion_interruptible(&comp); 4113 rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
4532 struct iscsi_conn *conn) 4544 struct iscsi_conn *conn)
4533{ 4545{
4534 struct iscsi_session *sess = conn->sess; 4546 struct iscsi_session *sess = conn->sess;
4535 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4547 int sleep = 1;
4548 /*
4549 * Traditional iscsi/tcp will invoke this logic from TX thread
4550 * context during session logout, so clear tx_thread_active and
4551 * sleep if iscsit_close_connection() has not already occured.
4552 *
4553 * Since iser-target invokes this logic from it's own workqueue,
4554 * always sleep waiting for RX/TX thread shutdown to complete
4555 * within iscsit_close_connection().
4556 */
4557 if (conn->conn_transport->transport_type == ISCSI_TCP)
4558 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4536 4559
4537 atomic_set(&conn->conn_logout_remove, 0); 4560 atomic_set(&conn->conn_logout_remove, 0);
4538 complete(&conn->conn_logout_comp); 4561 complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
4546static void iscsit_logout_post_handler_samecid( 4569static void iscsit_logout_post_handler_samecid(
4547 struct iscsi_conn *conn) 4570 struct iscsi_conn *conn)
4548{ 4571{
4549 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4572 int sleep = 1;
4573
4574 if (conn->conn_transport->transport_type == ISCSI_TCP)
4575 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4550 4576
4551 atomic_set(&conn->conn_logout_remove, 0); 4577 atomic_set(&conn->conn_logout_remove, 0);
4552 complete(&conn->conn_logout_comp); 4578 complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4765 struct iscsi_session *sess; 4791 struct iscsi_session *sess;
4766 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4792 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4767 struct se_session *se_sess, *se_sess_tmp; 4793 struct se_session *se_sess, *se_sess_tmp;
4794 LIST_HEAD(free_list);
4768 int session_count = 0; 4795 int session_count = 0;
4769 4796
4770 spin_lock_bh(&se_tpg->session_lock); 4797 spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4786 } 4813 }
4787 atomic_set(&sess->session_reinstatement, 1); 4814 atomic_set(&sess->session_reinstatement, 1);
4788 spin_unlock(&sess->conn_lock); 4815 spin_unlock(&sess->conn_lock);
4789 spin_unlock_bh(&se_tpg->session_lock);
4790 4816
4791 iscsit_free_session(sess); 4817 list_move_tail(&se_sess->sess_list, &free_list);
4792 spin_lock_bh(&se_tpg->session_lock); 4818 }
4819 spin_unlock_bh(&se_tpg->session_lock);
4820
4821 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4822 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4793 4823
4824 iscsit_free_session(sess);
4794 session_count++; 4825 session_count++;
4795 } 4826 }
4796 spin_unlock_bh(&se_tpg->session_lock);
4797 4827
4798 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4828 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4799 " Group: %hu\n", session_count, tpg->tpgt); 4829 " Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..7e8f65e5448f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
82 init_completion(&conn->conn_logout_comp); 82 init_completion(&conn->conn_logout_comp);
83 init_completion(&conn->rx_half_close_comp); 83 init_completion(&conn->rx_half_close_comp);
84 init_completion(&conn->tx_half_close_comp); 84 init_completion(&conn->tx_half_close_comp);
85 init_completion(&conn->rx_login_comp);
85 spin_lock_init(&conn->cmd_lock); 86 spin_lock_init(&conn->cmd_lock);
86 spin_lock_init(&conn->conn_usage_lock); 87 spin_lock_init(&conn->conn_usage_lock);
87 spin_lock_init(&conn->immed_queue_lock); 88 spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
644 iscsit_start_nopin_timer(conn); 645 iscsit_start_nopin_timer(conn);
645} 646}
646 647
647static int iscsit_start_kthreads(struct iscsi_conn *conn) 648int iscsit_start_kthreads(struct iscsi_conn *conn)
648{ 649{
649 int ret = 0; 650 int ret = 0;
650 651
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
679 680
680 return 0; 681 return 0;
681out_tx: 682out_tx:
683 send_sig(SIGINT, conn->tx_thread, 1);
682 kthread_stop(conn->tx_thread); 684 kthread_stop(conn->tx_thread);
683 conn->tx_thread_active = false; 685 conn->tx_thread_active = false;
684out_bitmap: 686out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
689 return ret; 691 return ret;
690} 692}
691 693
692int iscsi_post_login_handler( 694void iscsi_post_login_handler(
693 struct iscsi_np *np, 695 struct iscsi_np *np,
694 struct iscsi_conn *conn, 696 struct iscsi_conn *conn,
695 u8 zero_tsih) 697 u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
699 struct se_session *se_sess = sess->se_sess; 701 struct se_session *se_sess = sess->se_sess;
700 struct iscsi_portal_group *tpg = sess->tpg; 702 struct iscsi_portal_group *tpg = sess->tpg;
701 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 703 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
702 int rc;
703 704
704 iscsit_inc_conn_usage_count(conn); 705 iscsit_inc_conn_usage_count(conn);
705 706
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
739 sess->sess_ops->InitiatorName); 740 sess->sess_ops->InitiatorName);
740 spin_unlock_bh(&sess->conn_lock); 741 spin_unlock_bh(&sess->conn_lock);
741 742
742 rc = iscsit_start_kthreads(conn);
743 if (rc)
744 return rc;
745
746 iscsi_post_login_start_timers(conn); 743 iscsi_post_login_start_timers(conn);
747 /* 744 /*
748 * Determine CPU mask to ensure connection's RX and TX kthreads 745 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
751 iscsit_thread_get_cpumask(conn); 748 iscsit_thread_get_cpumask(conn);
752 conn->conn_rx_reset_cpumask = 1; 749 conn->conn_rx_reset_cpumask = 1;
753 conn->conn_tx_reset_cpumask = 1; 750 conn->conn_tx_reset_cpumask = 1;
754 751 /*
752 * Wakeup the sleeping iscsi_target_rx_thread() now that
753 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
754 */
755 complete(&conn->rx_login_comp);
755 iscsit_dec_conn_usage_count(conn); 756 iscsit_dec_conn_usage_count(conn);
757
756 if (stop_timer) { 758 if (stop_timer) {
757 spin_lock_bh(&se_tpg->session_lock); 759 spin_lock_bh(&se_tpg->session_lock);
758 iscsit_stop_time2retain_timer(sess); 760 iscsit_stop_time2retain_timer(sess);
759 spin_unlock_bh(&se_tpg->session_lock); 761 spin_unlock_bh(&se_tpg->session_lock);
760 } 762 }
761 iscsit_dec_session_usage_count(sess); 763 iscsit_dec_session_usage_count(sess);
762 return 0; 764 return;
763 } 765 }
764 766
765 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); 767 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
800 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 802 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
801 spin_unlock_bh(&se_tpg->session_lock); 803 spin_unlock_bh(&se_tpg->session_lock);
802 804
803 rc = iscsit_start_kthreads(conn);
804 if (rc)
805 return rc;
806
807 iscsi_post_login_start_timers(conn); 805 iscsi_post_login_start_timers(conn);
808 /* 806 /*
809 * Determine CPU mask to ensure connection's RX and TX kthreads 807 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
812 iscsit_thread_get_cpumask(conn); 810 iscsit_thread_get_cpumask(conn);
813 conn->conn_rx_reset_cpumask = 1; 811 conn->conn_rx_reset_cpumask = 1;
814 conn->conn_tx_reset_cpumask = 1; 812 conn->conn_tx_reset_cpumask = 1;
815 813 /*
814 * Wakeup the sleeping iscsi_target_rx_thread() now that
815 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
816 */
817 complete(&conn->rx_login_comp);
816 iscsit_dec_conn_usage_count(conn); 818 iscsit_dec_conn_usage_count(conn);
817
818 return 0;
819} 819}
820 820
821static void iscsi_handle_login_thread_timeout(unsigned long data) 821static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1380 if (ret < 0) 1380 if (ret < 0)
1381 goto new_sess_out; 1381 goto new_sess_out;
1382 1382
1383 if (!conn->sess) {
1384 pr_err("struct iscsi_conn session pointer is NULL!\n");
1385 goto new_sess_out;
1386 }
1387
1388 iscsi_stop_login_thread_timer(np); 1383 iscsi_stop_login_thread_timer(np);
1389 1384
1390 if (signal_pending(current))
1391 goto new_sess_out;
1392
1393 if (ret == 1) { 1385 if (ret == 1) {
1394 tpg_np = conn->tpg_np; 1386 tpg_np = conn->tpg_np;
1395 1387
1396 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1388 iscsi_post_login_handler(np, conn, zero_tsih);
1397 if (ret < 0)
1398 goto new_sess_out;
1399
1400 iscsit_deaccess_np(np, tpg, tpg_np); 1389 iscsit_deaccess_np(np, tpg, tpg_np);
1401 } 1390 }
1402 1391
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..57aa0d0fd820 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
15extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 15extern int iscsit_start_kthreads(struct iscsi_conn *);
16extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
16extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 17extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
17 bool, bool); 18 bool, bool);
18extern int iscsi_target_login_thread(void *); 19extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..f9cde9141836 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
17 ******************************************************************************/ 17 ******************************************************************************/
18 18
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/kthread.h>
20#include <scsi/iscsi_proto.h> 21#include <scsi/iscsi_proto.h>
21#include <target/target_core_base.h> 22#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
361 ntohl(login_rsp->statsn), login->rsp_length); 362 ntohl(login_rsp->statsn), login->rsp_length);
362 363
363 padding = ((-login->rsp_length) & 3); 364 padding = ((-login->rsp_length) & 3);
365 /*
366 * Before sending the last login response containing the transition
367 * bit for full-feature-phase, go ahead and start up TX/RX threads
368 * now to avoid potential resource allocation failures after the
369 * final login response has been sent.
370 */
371 if (login->login_complete) {
372 int rc = iscsit_start_kthreads(conn);
373 if (rc) {
374 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
375 ISCSI_LOGIN_STATUS_NO_RESOURCES);
376 return -1;
377 }
378 }
364 379
365 if (conn->conn_transport->iscsit_put_login_tx(conn, login, 380 if (conn->conn_transport->iscsit_put_login_tx(conn, login,
366 login->rsp_length + padding) < 0) 381 login->rsp_length + padding) < 0)
367 return -1; 382 goto err;
368 383
369 login->rsp_length = 0; 384 login->rsp_length = 0;
370 mutex_lock(&sess->cmdsn_mutex); 385 mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
373 mutex_unlock(&sess->cmdsn_mutex); 388 mutex_unlock(&sess->cmdsn_mutex);
374 389
375 return 0; 390 return 0;
391
392err:
393 if (login->login_complete) {
394 if (conn->rx_thread && conn->rx_thread_active) {
395 send_sig(SIGINT, conn->rx_thread, 1);
396 kthread_stop(conn->rx_thread);
397 }
398 if (conn->tx_thread && conn->tx_thread_active) {
399 send_sig(SIGINT, conn->tx_thread, 1);
400 kthread_stop(conn->tx_thread);
401 }
402 spin_lock(&iscsit_global->ts_bitmap_lock);
403 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
404 get_order(1));
405 spin_unlock(&iscsit_global->ts_bitmap_lock);
406 }
407 return -1;
376} 408}
377 409
378static void iscsi_target_sk_data_ready(struct sock *sk) 410static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..860e84046177 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
457 if (!strcmp(t->tf_ops->name, fo->name)) { 457 if (!strcmp(t->tf_ops->name, fo->name)) {
458 BUG_ON(atomic_read(&t->tf_access_cnt)); 458 BUG_ON(atomic_read(&t->tf_access_cnt));
459 list_del(&t->tf_list); 459 list_del(&t->tf_list);
460 mutex_unlock(&g_tf_lock);
461 /*
462 * Wait for any outstanding fabric se_deve_entry->rcu_head
463 * callbacks to complete post kfree_rcu(), before allowing
464 * fabric driver unload of TFO->module to proceed.
465 */
466 rcu_barrier();
460 kfree(t); 467 kfree(t);
461 break; 468 return;
462 } 469 }
463 } 470 }
464 mutex_unlock(&g_tf_lock); 471 mutex_unlock(&g_tf_lock);
@@ -747,7 +754,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
747 if (!dev->transport->init_prot || !dev->transport->free_prot) { 754 if (!dev->transport->init_prot || !dev->transport->free_prot) {
748 /* 0 is only allowed value for non-supporting backends */ 755 /* 0 is only allowed value for non-supporting backends */
749 if (flag == 0) 756 if (flag == 0)
750 return 0; 757 return count;
751 758
752 pr_err("DIF protection not supported by backend: %s\n", 759 pr_err("DIF protection not supported by backend: %s\n",
753 dev->transport->name); 760 dev->transport->name);
@@ -1590,9 +1597,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1590 u8 type = 0; 1597 u8 type = 0;
1591 1598
1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1599 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1593 return 0; 1600 return count;
1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1601 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1595 return 0; 1602 return count;
1596 1603
1597 if (dev->export_count) { 1604 if (dev->export_count) {
1598 pr_debug("Unable to process APTPL metadata while" 1605 pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1665,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1658 * PR APTPL Metadata for Reservation 1665 * PR APTPL Metadata for Reservation
1659 */ 1666 */
1660 case Opt_res_holder: 1667 case Opt_res_holder:
1661 match_int(args, &arg); 1668 ret = match_int(args, &arg);
1669 if (ret)
1670 goto out;
1662 res_holder = arg; 1671 res_holder = arg;
1663 break; 1672 break;
1664 case Opt_res_type: 1673 case Opt_res_type:
1665 match_int(args, &arg); 1674 ret = match_int(args, &arg);
1675 if (ret)
1676 goto out;
1666 type = (u8)arg; 1677 type = (u8)arg;
1667 break; 1678 break;
1668 case Opt_res_scope: 1679 case Opt_res_scope:
1669 match_int(args, &arg); 1680 ret = match_int(args, &arg);
1681 if (ret)
1682 goto out;
1670 break; 1683 break;
1671 case Opt_res_all_tg_pt: 1684 case Opt_res_all_tg_pt:
1672 match_int(args, &arg); 1685 ret = match_int(args, &arg);
1686 if (ret)
1687 goto out;
1673 all_tg_pt = (int)arg; 1688 all_tg_pt = (int)arg;
1674 break; 1689 break;
1675 case Opt_mapped_lun: 1690 case Opt_mapped_lun:
1676 match_int(args, &arg); 1691 ret = match_int(args, &arg);
1692 if (ret)
1693 goto out;
1677 mapped_lun = (u64)arg; 1694 mapped_lun = (u64)arg;
1678 break; 1695 break;
1679 /* 1696 /*
@@ -1701,14 +1718,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1701 } 1718 }
1702 break; 1719 break;
1703 case Opt_tpgt: 1720 case Opt_tpgt:
1704 match_int(args, &arg); 1721 ret = match_int(args, &arg);
1722 if (ret)
1723 goto out;
1705 tpgt = (u16)arg; 1724 tpgt = (u16)arg;
1706 break; 1725 break;
1707 case Opt_port_rtpi: 1726 case Opt_port_rtpi:
1708 match_int(args, &arg); 1727 ret = match_int(args, &arg);
1728 if (ret)
1729 goto out;
1709 break; 1730 break;
1710 case Opt_target_lun: 1731 case Opt_target_lun:
1711 match_int(args, &arg); 1732 ret = match_int(args, &arg);
1733 if (ret)
1734 goto out;
1712 target_lun = (u64)arg; 1735 target_lun = (u64)arg;
1713 break; 1736 break;
1714 default: 1737 default:
@@ -1985,7 +2008,7 @@ static ssize_t target_core_store_alua_lu_gp(
1985 2008
1986 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2009 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1987 if (!lu_gp_mem) 2010 if (!lu_gp_mem)
1988 return 0; 2011 return count;
1989 2012
1990 if (count > LU_GROUP_NAME_BUF) { 2013 if (count > LU_GROUP_NAME_BUF) {
1991 pr_err("ALUA LU Group Alias too large!\n"); 2014 pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8e70a8..be9cefc07407 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
84 list_for_each_entry(tb, &backend_list, list) { 84 list_for_each_entry(tb, &backend_list, list) {
85 if (tb->ops == ops) { 85 if (tb->ops == ops) {
86 list_del(&tb->list); 86 list_del(&tb->list);
87 mutex_unlock(&backend_mutex);
88 /*
89 * Wait for any outstanding backend driver ->rcu_head
90 * callbacks to complete post TBO->free_device() ->
91 * call_rcu(), before allowing backend driver module
92 * unload of target_backend_ops->owner to proceed.
93 */
94 rcu_barrier();
87 kfree(tb); 95 kfree(tb);
88 break; 96 return;
89 } 97 }
90 } 98 }
91 mutex_unlock(&backend_mutex); 99 mutex_unlock(&backend_mutex);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
1474 LIST_HEAD(tid_dest_list); 1474 LIST_HEAD(tid_dest_list);
1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1476 unsigned char *buf, *ptr, proto_ident; 1476 unsigned char *buf, *ptr, proto_ident;
1477 const unsigned char *i_str; 1477 const unsigned char *i_str = NULL;
1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; 1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1479 sense_reason_t ret; 1479 sense_reason_t ret;
1480 u32 tpdl, tid_len = 0; 1480 u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..384cf8894411 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
334 dev->dev_attrib.hw_max_sectors = UINT_MAX; 334 dev->dev_attrib.hw_max_sectors = UINT_MAX;
335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
336 dev->dev_attrib.is_nonrot = 1;
336 337
337 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 338 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
338 339
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..f87d4cef6d39 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
455 buf[4] = 0x5; 455 buf[4] = 0x5;
456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
458 buf[4] = 0x4; 458 buf[4] = 0x4;
459 } 459 }
460 460
461 /* logical unit supports type 1 and type 3 protection */
462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
465 buf[4] |= (0x3 << 3);
466 }
467
461 /* Set HEADSUP, ORDSUP, SIMPSUP */ 468 /* Set HEADSUP, ORDSUP, SIMPSUP */
462 buf[5] = 0x07; 469 buf[5] = 0x07;
463 470
@@ -1196,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1196 struct se_dev_entry *deve; 1203 struct se_dev_entry *deve;
1197 struct se_session *sess = cmd->se_sess; 1204 struct se_session *sess = cmd->se_sess;
1198 struct se_node_acl *nacl; 1205 struct se_node_acl *nacl;
1206 struct scsi_lun slun;
1199 unsigned char *buf; 1207 unsigned char *buf;
1200 u32 lun_count = 0, offset = 8; 1208 u32 lun_count = 0, offset = 8;
1201 1209 __be32 len;
1202 if (cmd->data_length < 16) {
1203 pr_warn("REPORT LUNS allocation length %u too small\n",
1204 cmd->data_length);
1205 return TCM_INVALID_CDB_FIELD;
1206 }
1207 1210
1208 buf = transport_kmap_data_sg(cmd); 1211 buf = transport_kmap_data_sg(cmd);
1209 if (!buf) 1212 if (cmd->data_length && !buf)
1210 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1213 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1211 1214
1212 /* 1215 /*
@@ -1214,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1214 * coming via a target_core_mod PASSTHROUGH op, and not through 1217 * coming via a target_core_mod PASSTHROUGH op, and not through
1215 * a $FABRIC_MOD. In that case, report LUN=0 only. 1218 * a $FABRIC_MOD. In that case, report LUN=0 only.
1216 */ 1219 */
1217 if (!sess) { 1220 if (!sess)
1218 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
1219 lun_count = 1;
1220 goto done; 1221 goto done;
1221 } 1222
1222 nacl = sess->se_node_acl; 1223 nacl = sess->se_node_acl;
1223 1224
1224 rcu_read_lock(); 1225 rcu_read_lock();
@@ -1229,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1229 * See SPC2-R20 7.19. 1230 * See SPC2-R20 7.19.
1230 */ 1231 */
1231 lun_count++; 1232 lun_count++;
1232 if ((offset + 8) > cmd->data_length) 1233 if (offset >= cmd->data_length)
1233 continue; 1234 continue;
1234 1235
1235 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1236 int_to_scsilun(deve->mapped_lun, &slun);
1237 memcpy(buf + offset, &slun,
1238 min(8u, cmd->data_length - offset));
1236 offset += 8; 1239 offset += 8;
1237 } 1240 }
1238 rcu_read_unlock(); 1241 rcu_read_unlock();
@@ -1241,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
1241 * See SPC3 r07, page 159. 1244 * See SPC3 r07, page 159.
1242 */ 1245 */
1243done: 1246done:
1244 lun_count *= 8; 1247 /*
1245 buf[0] = ((lun_count >> 24) & 0xff); 1248 * If no LUNs are accessible, report virtual LUN 0.
1246 buf[1] = ((lun_count >> 16) & 0xff); 1249 */
1247 buf[2] = ((lun_count >> 8) & 0xff); 1250 if (lun_count == 0) {
1248 buf[3] = (lun_count & 0xff); 1251 int_to_scsilun(0, &slun);
1249 transport_kunmap_data_sg(cmd); 1252 if (cmd->data_length > 8)
1253 memcpy(buf + offset, &slun,
1254 min(8u, cmd->data_length - offset));
1255 lun_count = 1;
1256 }
1257
1258 if (buf) {
1259 len = cpu_to_be32(lun_count * 8);
1260 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
1261 transport_kunmap_data_sg(cmd);
1262 }
1250 1263
1251 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1264 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
1252 return 0; 1265 return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61b9648..620dcd405ff6 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@ struct power_table {
68 * registered cooling device. 68 * registered cooling device.
69 * @cpufreq_state: integer value representing the current state of cpufreq 69 * @cpufreq_state: integer value representing the current state of cpufreq
70 * cooling devices. 70 * cooling devices.
71 * @cpufreq_val: integer value representing the absolute value of the clipped 71 * @clipped_freq: integer value representing the absolute value of the clipped
72 * frequency. 72 * frequency.
73 * @max_level: maximum cooling level. One less than total number of valid 73 * @max_level: maximum cooling level. One less than total number of valid
74 * cpufreq frequencies. 74 * cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
91 int id; 91 int id;
92 struct thermal_cooling_device *cool_dev; 92 struct thermal_cooling_device *cool_dev;
93 unsigned int cpufreq_state; 93 unsigned int cpufreq_state;
94 unsigned int cpufreq_val; 94 unsigned int clipped_freq;
95 unsigned int max_level; 95 unsigned int max_level;
96 unsigned int *freq_table; /* In descending order */ 96 unsigned int *freq_table; /* In descending order */
97 struct cpumask allowed_cpus; 97 struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
107static DEFINE_IDR(cpufreq_idr); 107static DEFINE_IDR(cpufreq_idr);
108static DEFINE_MUTEX(cooling_cpufreq_lock); 108static DEFINE_MUTEX(cooling_cpufreq_lock);
109 109
110static unsigned int cpufreq_dev_count;
111
112static DEFINE_MUTEX(cooling_list_lock);
110static LIST_HEAD(cpufreq_dev_list); 113static LIST_HEAD(cpufreq_dev_list);
111 114
112/** 115/**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
185{ 188{
186 struct cpufreq_cooling_device *cpufreq_dev; 189 struct cpufreq_cooling_device *cpufreq_dev;
187 190
188 mutex_lock(&cooling_cpufreq_lock); 191 mutex_lock(&cooling_list_lock);
189 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 192 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
190 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { 193 if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
191 mutex_unlock(&cooling_cpufreq_lock); 194 mutex_unlock(&cooling_list_lock);
192 return get_level(cpufreq_dev, freq); 195 return get_level(cpufreq_dev, freq);
193 } 196 }
194 } 197 }
195 mutex_unlock(&cooling_cpufreq_lock); 198 mutex_unlock(&cooling_list_lock);
196 199
197 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); 200 pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
198 return THERMAL_CSTATE_INVALID; 201 return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
215 unsigned long event, void *data) 218 unsigned long event, void *data)
216{ 219{
217 struct cpufreq_policy *policy = data; 220 struct cpufreq_policy *policy = data;
218 unsigned long max_freq = 0; 221 unsigned long clipped_freq;
219 struct cpufreq_cooling_device *cpufreq_dev; 222 struct cpufreq_cooling_device *cpufreq_dev;
220 223
221 switch (event) { 224 if (event != CPUFREQ_ADJUST)
225 return NOTIFY_DONE;
222 226
223 case CPUFREQ_ADJUST: 227 mutex_lock(&cooling_list_lock);
224 mutex_lock(&cooling_cpufreq_lock); 228 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
225 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 229 if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
226 if (!cpumask_test_cpu(policy->cpu, 230 continue;
227 &cpufreq_dev->allowed_cpus))
228 continue;
229 231
230 max_freq = cpufreq_dev->cpufreq_val; 232 /*
233 * policy->max is the maximum allowed frequency defined by user
234 * and clipped_freq is the maximum that thermal constraints
235 * allow.
236 *
237 * If clipped_freq is lower than policy->max, then we need to
238 * readjust policy->max.
239 *
240 * But, if clipped_freq is greater than policy->max, we don't
241 * need to do anything.
242 */
243 clipped_freq = cpufreq_dev->clipped_freq;
231 244
232 if (policy->max != max_freq) 245 if (policy->max > clipped_freq)
233 cpufreq_verify_within_limits(policy, 0, 246 cpufreq_verify_within_limits(policy, 0, clipped_freq);
234 max_freq);
235 }
236 mutex_unlock(&cooling_cpufreq_lock);
237 break; 247 break;
238 default:
239 return NOTIFY_DONE;
240 } 248 }
249 mutex_unlock(&cooling_list_lock);
241 250
242 return NOTIFY_OK; 251 return NOTIFY_OK;
243} 252}
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
519 528
520 clip_freq = cpufreq_device->freq_table[state]; 529 clip_freq = cpufreq_device->freq_table[state];
521 cpufreq_device->cpufreq_state = state; 530 cpufreq_device->cpufreq_state = state;
522 cpufreq_device->cpufreq_val = clip_freq; 531 cpufreq_device->clipped_freq = clip_freq;
523 532
524 cpufreq_update_policy(cpu); 533 cpufreq_update_policy(cpu);
525 534
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
861 pr_debug("%s: freq:%u KHz\n", __func__, freq); 870 pr_debug("%s: freq:%u KHz\n", __func__, freq);
862 } 871 }
863 872
864 cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0]; 873 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
865 cpufreq_dev->cool_dev = cool_dev; 874 cpufreq_dev->cool_dev = cool_dev;
866 875
867 mutex_lock(&cooling_cpufreq_lock); 876 mutex_lock(&cooling_cpufreq_lock);
868 877
878 mutex_lock(&cooling_list_lock);
879 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
880 mutex_unlock(&cooling_list_lock);
881
869 /* Register the notifier for first cpufreq cooling device */ 882 /* Register the notifier for first cpufreq cooling device */
870 if (list_empty(&cpufreq_dev_list)) 883 if (!cpufreq_dev_count++)
871 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 884 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
872 CPUFREQ_POLICY_NOTIFIER); 885 CPUFREQ_POLICY_NOTIFIER);
873 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
874
875 mutex_unlock(&cooling_cpufreq_lock); 886 mutex_unlock(&cooling_cpufreq_lock);
876 887
877 return cool_dev; 888 return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
1013 return; 1024 return;
1014 1025
1015 cpufreq_dev = cdev->devdata; 1026 cpufreq_dev = cdev->devdata;
1016 mutex_lock(&cooling_cpufreq_lock);
1017 list_del(&cpufreq_dev->node);
1018 1027
1019 /* Unregister the notifier for the last cpufreq cooling device */ 1028 /* Unregister the notifier for the last cpufreq cooling device */
1020 if (list_empty(&cpufreq_dev_list)) 1029 mutex_lock(&cooling_cpufreq_lock);
1030 if (!--cpufreq_dev_count)
1021 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 1031 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
1022 CPUFREQ_POLICY_NOTIFIER); 1032 CPUFREQ_POLICY_NOTIFIER);
1033
1034 mutex_lock(&cooling_list_lock);
1035 list_del(&cpufreq_dev->node);
1036 mutex_unlock(&cooling_list_lock);
1037
1023 mutex_unlock(&cooling_cpufreq_lock); 1038 mutex_unlock(&cooling_cpufreq_lock);
1024 1039
1025 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1040 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..b49f97c734d0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
405static struct platform_driver hisi_thermal_driver = { 405static struct platform_driver hisi_thermal_driver = {
406 .driver = { 406 .driver = {
407 .name = "hisi_thermal", 407 .name = "hisi_thermal",
408 .owner = THIS_MODULE,
409 .pm = &hisi_thermal_pm_ops, 408 .pm = &hisi_thermal_pm_ops,
410 .of_match_table = of_hisi_thermal_match, 409 .of_match_table = of_hisi_thermal_match,
411 }, 410 },
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..7006860f2f36 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
229 struct thermal_instance *instance; 229 struct thermal_instance *instance;
230 struct power_allocator_params *params = tz->governor_data; 230 struct power_allocator_params *params = tz->governor_data;
231 u32 *req_power, *max_power, *granted_power, *extra_actor_power; 231 u32 *req_power, *max_power, *granted_power, *extra_actor_power;
232 u32 total_req_power, max_allocatable_power; 232 u32 *weighted_req_power;
233 u32 total_req_power, max_allocatable_power, total_weighted_req_power;
233 u32 total_granted_power, power_range; 234 u32 total_granted_power, power_range;
234 int i, num_actors, total_weight, ret = 0; 235 int i, num_actors, total_weight, ret = 0;
235 int trip_max_desired_temperature = params->trip_max_desired_temperature; 236 int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 248 }
248 249
249 /* 250 /*
250 * We need to allocate three arrays of the same size: 251 * We need to allocate five arrays of the same size:
251 * req_power, max_power and granted_power. They are going to 252 * req_power, max_power, granted_power, extra_actor_power and
252 * be needed until this function returns. Allocate them all 253 * weighted_req_power. They are going to be needed until this
253 * in one go to simplify the allocation and deallocation 254 * function returns. Allocate them all in one go to simplify
254 * logic. 255 * the allocation and deallocation logic.
255 */ 256 */
256 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); 257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); 258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); 259 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
259 req_power = devm_kcalloc(&tz->device, num_actors * 4, 260 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
261 req_power = devm_kcalloc(&tz->device, num_actors * 5,
260 sizeof(*req_power), GFP_KERNEL); 262 sizeof(*req_power), GFP_KERNEL);
261 if (!req_power) { 263 if (!req_power) {
262 ret = -ENOMEM; 264 ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
266 max_power = &req_power[num_actors]; 268 max_power = &req_power[num_actors];
267 granted_power = &req_power[2 * num_actors]; 269 granted_power = &req_power[2 * num_actors];
268 extra_actor_power = &req_power[3 * num_actors]; 270 extra_actor_power = &req_power[3 * num_actors];
271 weighted_req_power = &req_power[4 * num_actors];
269 272
270 i = 0; 273 i = 0;
274 total_weighted_req_power = 0;
271 total_req_power = 0; 275 total_req_power = 0;
272 max_allocatable_power = 0; 276 max_allocatable_power = 0;
273 277
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
289 else 293 else
290 weight = instance->weight; 294 weight = instance->weight;
291 295
292 req_power[i] = frac_to_int(weight * req_power[i]); 296 weighted_req_power[i] = frac_to_int(weight * req_power[i]);
293 297
294 if (power_actor_get_max_power(cdev, tz, &max_power[i])) 298 if (power_actor_get_max_power(cdev, tz, &max_power[i]))
295 continue; 299 continue;
296 300
297 total_req_power += req_power[i]; 301 total_req_power += req_power[i];
298 max_allocatable_power += max_power[i]; 302 max_allocatable_power += max_power[i];
303 total_weighted_req_power += weighted_req_power[i];
299 304
300 i++; 305 i++;
301 } 306 }
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
303 power_range = pid_controller(tz, current_temp, control_temp, 308 power_range = pid_controller(tz, current_temp, control_temp,
304 max_allocatable_power); 309 max_allocatable_power);
305 310
306 divvy_up_power(req_power, max_power, num_actors, total_req_power, 311 divvy_up_power(weighted_req_power, max_power, num_actors,
307 power_range, granted_power, extra_actor_power); 312 total_weighted_req_power, power_range, granted_power,
313 extra_actor_power);
308 314
309 total_granted_power = 0; 315 total_granted_power = 0;
310 i = 0; 316 i = 0;
@@ -328,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
328 max_allocatable_power, current_temp, 334 max_allocatable_power, current_temp,
329 (s32)control_temp - (s32)current_temp); 335 (s32)control_temp - (s32)current_temp);
330 336
331 devm_kfree(&tz->device, req_power); 337 kfree(req_power);
332unlock: 338unlock:
333 mutex_unlock(&tz->lock); 339 mutex_unlock(&tz->lock);
334 340
@@ -420,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
420 return -EINVAL; 426 return -EINVAL;
421 } 427 }
422 428
423 params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL); 429 params = kzalloc(sizeof(*params), GFP_KERNEL);
424 if (!params) 430 if (!params)
425 return -ENOMEM; 431 return -ENOMEM;
426 432
@@ -462,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
462 return 0; 468 return 0;
463 469
464free: 470free:
465 devm_kfree(&tz->device, params); 471 kfree(params);
466 return ret; 472 return ret;
467} 473}
468 474
469static void power_allocator_unbind(struct thermal_zone_device *tz) 475static void power_allocator_unbind(struct thermal_zone_device *tz)
470{ 476{
471 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 477 dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
472 devm_kfree(&tz->device, tz->governor_data); 478 kfree(tz->governor_data);
473 tz->governor_data = NULL; 479 tz->governor_data = NULL;
474} 480}
475 481
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
1config EXYNOS_THERMAL 1config EXYNOS_THERMAL
2 tristate "Exynos thermal management unit driver" 2 tristate "Exynos thermal management unit driver"
3 depends on OF 3 depends on THERMAL_OF
4 help 4 help
5 If you say yes here you get support for the TMU (Thermal Management 5 If you say yes here you get support for the TMU (Thermal Management
6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises 6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..c96ff10b869e 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1296 1296
1297static int exynos_tmu_probe(struct platform_device *pdev) 1297static int exynos_tmu_probe(struct platform_device *pdev)
1298{ 1298{
1299 struct exynos_tmu_platform_data *pdata;
1300 struct exynos_tmu_data *data; 1299 struct exynos_tmu_data *data;
1301 int ret; 1300 int ret;
1302 1301
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1318 if (ret) 1317 if (ret)
1319 goto err_sensor; 1318 goto err_sensor;
1320 1319
1321 pdata = data->pdata;
1322
1323 INIT_WORK(&data->irq_work, exynos_tmu_work); 1320 INIT_WORK(&data->irq_work, exynos_tmu_work);
1324 1321
1325 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1322 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
1392 if (!IS_ERR(data->clk_sec)) 1389 if (!IS_ERR(data->clk_sec))
1393 clk_unprepare(data->clk_sec); 1390 clk_unprepare(data->clk_sec);
1394err_sensor: 1391err_sensor:
1392 if (!IS_ERR_OR_NULL(data->regulator))
1393 regulator_disable(data->regulator);
1395 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); 1394 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1396 1395
1397 return ret; 1396 return ret;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..4ca211be4c0f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
1333 return -ENODEV; 1333 return -ENODEV;
1334 1334
1335unbind: 1335unbind:
1336 device_remove_file(&tz->device, &pos->weight_attr);
1336 device_remove_file(&tz->device, &pos->attr); 1337 device_remove_file(&tz->device, &pos->attr);
1337 sysfs_remove_link(&tz->device.kobj, pos->name); 1338 sysfs_remove_link(&tz->device.kobj, pos->name);
1338 release_idr(&tz->idr, &tz->lock, pos->id); 1339 release_idr(&tz->idr, &tz->lock, pos->id);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..ee8bfacf2071 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1108 * Locking: ctrl_lock 1108 * Locking: ctrl_lock
1109 */ 1109 */
1110 1110
1111static void isig(int sig, struct tty_struct *tty) 1111static void __isig(int sig, struct tty_struct *tty)
1112{ 1112{
1113 struct n_tty_data *ldata = tty->disc_data;
1114 struct pid *tty_pgrp = tty_get_pgrp(tty); 1113 struct pid *tty_pgrp = tty_get_pgrp(tty);
1115 if (tty_pgrp) { 1114 if (tty_pgrp) {
1116 kill_pgrp(tty_pgrp, sig, 1); 1115 kill_pgrp(tty_pgrp, sig, 1);
1117 put_pid(tty_pgrp); 1116 put_pid(tty_pgrp);
1118 } 1117 }
1118}
1119 1119
1120 if (!L_NOFLSH(tty)) { 1120static void isig(int sig, struct tty_struct *tty)
1121{
1122 struct n_tty_data *ldata = tty->disc_data;
1123
1124 if (L_NOFLSH(tty)) {
1125 /* signal only */
1126 __isig(sig, tty);
1127
1128 } else { /* signal and flush */
1121 up_read(&tty->termios_rwsem); 1129 up_read(&tty->termios_rwsem);
1122 down_write(&tty->termios_rwsem); 1130 down_write(&tty->termios_rwsem);
1123 1131
1132 __isig(sig, tty);
1133
1124 /* clear echo buffer */ 1134 /* clear echo buffer */
1125 mutex_lock(&ldata->output_lock); 1135 mutex_lock(&ldata->output_lock);
1126 ldata->echo_head = ldata->echo_tail = 0; 1136 ldata->echo_head = ldata->echo_tail = 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..15b4079a335e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
1185config SERIAL_SC16IS7XX 1185config SERIAL_SC16IS7XX
1186 tristate "SC16IS7xx serial support" 1186 tristate "SC16IS7xx serial support"
1187 select SERIAL_CORE 1187 select SERIAL_CORE
1188 depends on I2C || SPI_MASTER 1188 depends on (SPI_MASTER && !I2C) || I2C
1189 help 1189 help
1190 This selects support for SC16IS7xx serial ports. 1190 This selects support for SC16IS7xx serial ports.
1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, 1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2310 void __iomem *base; 2310 void __iomem *base;
2311 2311
2312 base = devm_ioremap_resource(dev, mmiobase); 2312 base = devm_ioremap_resource(dev, mmiobase);
2313 if (!base) 2313 if (IS_ERR(base))
2314 return -ENOMEM; 2314 return PTR_ERR(base);
2315 2315
2316 index = pl011_probe_dt_alias(index, dev); 2316 index = pl011_probe_dt_alias(index, dev);
2317 2317
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..679709f51fd4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
950 950
951 port = platform_get_drvdata(pdev); 951 port = platform_get_drvdata(pdev);
952 uart_remove_one_port(&etraxfs_uart_driver, port); 952 uart_remove_one_port(&etraxfs_uart_driver, port);
953 etraxfs_uart_ports[pdev->id] = NULL; 953 etraxfs_uart_ports[port->line] = NULL;
954 954
955 return 0; 955 return 0;
956} 956}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..54fdc7866ea1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
1121 1121
1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1123 1123
1124 /* Can we enable the DMA support? */
1125 if (is_imx6q_uart(sport) && !uart_console(port) &&
1126 !sport->dma_is_inited)
1127 imx_uart_dma_init(sport);
1128
1129 spin_lock_irqsave(&sport->port.lock, flags); 1124 spin_lock_irqsave(&sport->port.lock, flags);
1130 /* Reset fifo's and state machines */ 1125 /* Reset fifo's and state machines */
1131 i = 100; 1126 i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
1143 writel(USR1_RTSD, sport->port.membase + USR1); 1138 writel(USR1_RTSD, sport->port.membase + USR1);
1144 writel(USR2_ORE, sport->port.membase + USR2); 1139 writel(USR2_ORE, sport->port.membase + USR2);
1145 1140
1146 if (sport->dma_is_inited && !sport->dma_is_enabled)
1147 imx_enable_dma(sport);
1148
1149 temp = readl(sport->port.membase + UCR1); 1141 temp = readl(sport->port.membase + UCR1);
1150 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 1142 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1151 1143
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1316 } else { 1308 } else {
1317 ucr2 |= UCR2_CTSC; 1309 ucr2 |= UCR2_CTSC;
1318 } 1310 }
1311
1312 /* Can we enable the DMA support? */
1313 if (is_imx6q_uart(sport) && !uart_console(port)
1314 && !sport->dma_is_inited)
1315 imx_uart_dma_init(sport);
1319 } else { 1316 } else {
1320 termios->c_cflag &= ~CRTSCTS; 1317 termios->c_cflag &= ~CRTSCTS;
1321 } 1318 }
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1432 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1429 if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1433 imx_enable_ms(&sport->port); 1430 imx_enable_ms(&sport->port);
1434 1431
1432 if (sport->dma_is_inited && !sport->dma_is_enabled)
1433 imx_enable_dma(sport);
1435 spin_unlock_irqrestore(&sport->port.lock, flags); 1434 spin_unlock_irqrestore(&sport->port.lock, flags);
1436} 1435}
1437 1436
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..5ccc698cbbfa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val); 354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
355} 355}
356 356
357static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
358{
359 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
360 u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
361
362 regcache_cache_bypass(s->regmap, true);
363 regmap_raw_read(s->regmap, addr, s->buf, rxlen);
364 regcache_cache_bypass(s->regmap, false);
365}
366
367static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
368{
369 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
370 u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
371
372 regcache_cache_bypass(s->regmap, true);
373 regmap_raw_write(s->regmap, addr, s->buf, to_send);
374 regcache_cache_bypass(s->regmap, false);
375}
376
357static void sc16is7xx_port_update(struct uart_port *port, u8 reg, 377static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
358 u8 mask, u8 val) 378 u8 mask, u8 val)
359{ 379{
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
508 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 528 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
509 bytes_read = 1; 529 bytes_read = 1;
510 } else { 530 } else {
511 regcache_cache_bypass(s->regmap, true); 531 sc16is7xx_fifo_read(port, rxlen);
512 regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
513 s->buf, rxlen);
514 regcache_cache_bypass(s->regmap, false);
515 bytes_read = rxlen; 532 bytes_read = rxlen;
516 } 533 }
517 534
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
591 s->buf[i] = xmit->buf[xmit->tail]; 608 s->buf[i] = xmit->buf[xmit->tail];
592 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 609 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
593 } 610 }
594 regcache_cache_bypass(s->regmap, true); 611
595 regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); 612 sc16is7xx_fifo_write(port, to_send);
596 regcache_cache_bypass(s->regmap, false);
597 } 613 }
598 614
599 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 615 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..f36852067f20 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1418 mutex_lock(&port->mutex); 1418 mutex_lock(&port->mutex);
1419 uart_shutdown(tty, state); 1419 uart_shutdown(tty, state);
1420 tty_port_tty_set(port, NULL); 1420 tty_port_tty_set(port, NULL);
1421 tty->closing = 0; 1421
1422 spin_lock_irqsave(&port->lock, flags); 1422 spin_lock_irqsave(&port->lock, flags);
1423 1423
1424 if (port->blocked_open) { 1424 if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1444 mutex_unlock(&port->mutex); 1444 mutex_unlock(&port->mutex);
1445 1445
1446 tty_ldisc_flush(tty); 1446 tty_ldisc_flush(tty);
1447 tty->closing = 0;
1447} 1448}
1448 1449
1449static void uart_wait_until_sent(struct tty_struct *tty, int timeout) 1450static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
356 schedule(); 356 schedule();
357 continue; 357 continue;
358 } 358 }
359 __set_current_state(TASK_RUNNING);
359 count = sel_buffer_lth - pasted; 360 count = sel_buffer_lth - pasted;
360 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, 361 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
361 count); 362 count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
742 __module_get(vc->vc_sw->owner); 742 __module_get(vc->vc_sw->owner);
743 vc->vc_num = num; 743 vc->vc_num = num;
744 vc->vc_display_fg = &master_display_fg; 744 vc->vc_display_fg = &master_display_fg;
745 if (vc->vc_uni_pagedir_loc)
746 con_free_unimap(vc);
745 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; 747 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
746 vc->vc_uni_pagedir = NULL; 748 vc->vc_uni_pagedir = NULL;
747 vc->vc_hi_font_mask = 0; 749 vc->vc_hi_font_mask = 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
1024 }, 1024 },
1025}; 1025};
1026 1026
1027module_platform_driver(ci_hdrc_driver); 1027static int __init ci_hdrc_platform_register(void)
1028{
1029 ci_hdrc_host_driver_init();
1030 return platform_driver_register(&ci_hdrc_driver);
1031}
1032module_init(ci_hdrc_platform_register);
1033
1034static void __exit ci_hdrc_platform_unregister(void)
1035{
1036 platform_driver_unregister(&ci_hdrc_driver);
1037}
1038module_exit(ci_hdrc_platform_unregister);
1028 1039
1029MODULE_ALIAS("platform:ci_hdrc"); 1040MODULE_ALIAS("platform:ci_hdrc");
1030MODULE_LICENSE("GPL v2"); 1041MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..7161439def19 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
249 rdrv->name = "host"; 249 rdrv->name = "host";
250 ci->roles[CI_ROLE_HOST] = rdrv; 250 ci->roles[CI_ROLE_HOST] = rdrv;
251 251
252 return 0;
253}
254
255void ci_hdrc_host_driver_init(void)
256{
252 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); 257 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
253 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; 258 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
254 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; 259 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
255
256 return 0;
257} 260}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
5 5
6int ci_hdrc_host_init(struct ci_hdrc *ci); 6int ci_hdrc_host_init(struct ci_hdrc *ci);
7void ci_hdrc_host_destroy(struct ci_hdrc *ci); 7void ci_hdrc_host_destroy(struct ci_hdrc *ci);
8void ci_hdrc_host_driver_init(void);
8 9
9#else 10#else
10 11
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
18 19
19} 20}
20 21
22static void ci_hdrc_host_driver_init(void)
23{
24
25}
26
21#endif 27#endif
22 28
23#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ 29#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
1944 usb_deregister(&acm_driver); 1944 usb_deregister(&acm_driver);
1945 tty_unregister_driver(acm_tty_driver); 1945 tty_unregister_driver(acm_tty_driver);
1946 put_tty_driver(acm_tty_driver); 1946 put_tty_driver(acm_tty_driver);
1947 idr_destroy(&acm_minors);
1947} 1948}
1948 1949
1949module_init(acm_init); 1950module_init(acm_init);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
242{ 242{
243 return bus_register(&ulpi_bus); 243 return bus_register(&ulpi_bus);
244} 244}
245module_init(ulpi_init); 245subsys_initcall(ulpi_init);
246 246
247static void __exit ulpi_exit(void) 247static void __exit ulpi_exit(void)
248{ 248{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..cbcd0920fb51 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
1022 dev_name(&usb_dev->dev), retval); 1022 dev_name(&usb_dev->dev), retval);
1023 return (retval < 0) ? retval : -EMSGSIZE; 1023 return (retval < 0) ? retval : -EMSGSIZE;
1024 } 1024 }
1025 if (usb_dev->speed == USB_SPEED_SUPER) { 1025
1026 if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
1026 retval = usb_get_bos_descriptor(usb_dev); 1027 retval = usb_get_bos_descriptor(usb_dev);
1027 if (retval < 0) { 1028 if (!retval) {
1029 usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
1030 } else if (usb_dev->speed == USB_SPEED_SUPER) {
1028 mutex_unlock(&usb_bus_list_lock); 1031 mutex_unlock(&usb_bus_list_lock);
1029 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", 1032 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
1030 dev_name(&usb_dev->dev), retval); 1033 dev_name(&usb_dev->dev), retval);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..73dfa194160b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
122 return usb_get_intfdata(hdev->actconfig->interface[0]); 122 return usb_get_intfdata(hdev->actconfig->interface[0]);
123} 123}
124 124
125static int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* USB 2.1 (and greater) devices indicate LPM support through 127 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 128 * their USB 2.0 Extended Capabilities BOS descriptor.
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
65extern void usb_hub_cleanup(void); 65extern void usb_hub_cleanup(void);
66extern int usb_major_init(void); 66extern int usb_major_init(void);
67extern void usb_major_cleanup(void); 67extern void usb_major_cleanup(void);
68extern int usb_device_supports_lpm(struct usb_device *udev);
68 69
69#ifdef CONFIG_PM 70#ifdef CONFIG_PM
70 71
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e5b546f1152e..c3cc1a78d1e2 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
72 dev_dbg(hsotg->dev, "%s\n", __func__); 72 dev_dbg(hsotg->dev, "%s\n", __func__);
73 73
74 /* Backup Host regs */ 74 /* Backup Host regs */
75 hr = hsotg->hr_backup; 75 hr = &hsotg->hr_backup;
76 if (!hr) {
77 hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
78 if (!hr) {
79 dev_err(hsotg->dev, "%s: can't allocate host regs\n",
80 __func__);
81 return -ENOMEM;
82 }
83
84 hsotg->hr_backup = hr;
85 }
86 hr->hcfg = readl(hsotg->regs + HCFG); 76 hr->hcfg = readl(hsotg->regs + HCFG);
87 hr->haintmsk = readl(hsotg->regs + HAINTMSK); 77 hr->haintmsk = readl(hsotg->regs + HAINTMSK);
88 for (i = 0; i < hsotg->core_params->host_channels; ++i) 78 for (i = 0; i < hsotg->core_params->host_channels; ++i)
@@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
90 80
91 hr->hprt0 = readl(hsotg->regs + HPRT0); 81 hr->hprt0 = readl(hsotg->regs + HPRT0);
92 hr->hfir = readl(hsotg->regs + HFIR); 82 hr->hfir = readl(hsotg->regs + HFIR);
83 hr->valid = true;
93 84
94 return 0; 85 return 0;
95} 86}
@@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
109 dev_dbg(hsotg->dev, "%s\n", __func__); 100 dev_dbg(hsotg->dev, "%s\n", __func__);
110 101
111 /* Restore host regs */ 102 /* Restore host regs */
112 hr = hsotg->hr_backup; 103 hr = &hsotg->hr_backup;
113 if (!hr) { 104 if (!hr->valid) {
114 dev_err(hsotg->dev, "%s: no host registers to restore\n", 105 dev_err(hsotg->dev, "%s: no host registers to restore\n",
115 __func__); 106 __func__);
116 return -EINVAL; 107 return -EINVAL;
117 } 108 }
109 hr->valid = false;
118 110
119 writel(hr->hcfg, hsotg->regs + HCFG); 111 writel(hr->hcfg, hsotg->regs + HCFG);
120 writel(hr->haintmsk, hsotg->regs + HAINTMSK); 112 writel(hr->haintmsk, hsotg->regs + HAINTMSK);
@@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
152 dev_dbg(hsotg->dev, "%s\n", __func__); 144 dev_dbg(hsotg->dev, "%s\n", __func__);
153 145
154 /* Backup dev regs */ 146 /* Backup dev regs */
155 dr = hsotg->dr_backup; 147 dr = &hsotg->dr_backup;
156 if (!dr) {
157 dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
158 if (!dr) {
159 dev_err(hsotg->dev, "%s: can't allocate device regs\n",
160 __func__);
161 return -ENOMEM;
162 }
163
164 hsotg->dr_backup = dr;
165 }
166 148
167 dr->dcfg = readl(hsotg->regs + DCFG); 149 dr->dcfg = readl(hsotg->regs + DCFG);
168 dr->dctl = readl(hsotg->regs + DCTL); 150 dr->dctl = readl(hsotg->regs + DCTL);
@@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
195 dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i)); 177 dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
196 dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i)); 178 dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
197 } 179 }
198 180 dr->valid = true;
199 return 0; 181 return 0;
200} 182}
201 183
@@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
215 dev_dbg(hsotg->dev, "%s\n", __func__); 197 dev_dbg(hsotg->dev, "%s\n", __func__);
216 198
217 /* Restore dev regs */ 199 /* Restore dev regs */
218 dr = hsotg->dr_backup; 200 dr = &hsotg->dr_backup;
219 if (!dr) { 201 if (!dr->valid) {
220 dev_err(hsotg->dev, "%s: no device registers to restore\n", 202 dev_err(hsotg->dev, "%s: no device registers to restore\n",
221 __func__); 203 __func__);
222 return -EINVAL; 204 return -EINVAL;
223 } 205 }
206 dr->valid = false;
224 207
225 writel(dr->dcfg, hsotg->regs + DCFG); 208 writel(dr->dcfg, hsotg->regs + DCFG);
226 writel(dr->dctl, hsotg->regs + DCTL); 209 writel(dr->dctl, hsotg->regs + DCTL);
@@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
268 int i; 251 int i;
269 252
270 /* Backup global regs */ 253 /* Backup global regs */
271 gr = hsotg->gr_backup; 254 gr = &hsotg->gr_backup;
272 if (!gr) {
273 gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
274 if (!gr) {
275 dev_err(hsotg->dev, "%s: can't allocate global regs\n",
276 __func__);
277 return -ENOMEM;
278 }
279
280 hsotg->gr_backup = gr;
281 }
282 255
283 gr->gotgctl = readl(hsotg->regs + GOTGCTL); 256 gr->gotgctl = readl(hsotg->regs + GOTGCTL);
284 gr->gintmsk = readl(hsotg->regs + GINTMSK); 257 gr->gintmsk = readl(hsotg->regs + GINTMSK);
@@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
291 for (i = 0; i < MAX_EPS_CHANNELS; i++) 264 for (i = 0; i < MAX_EPS_CHANNELS; i++)
292 gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i)); 265 gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
293 266
267 gr->valid = true;
294 return 0; 268 return 0;
295} 269}
296 270
@@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
309 dev_dbg(hsotg->dev, "%s\n", __func__); 283 dev_dbg(hsotg->dev, "%s\n", __func__);
310 284
311 /* Restore global regs */ 285 /* Restore global regs */
312 gr = hsotg->gr_backup; 286 gr = &hsotg->gr_backup;
313 if (!gr) { 287 if (!gr->valid) {
314 dev_err(hsotg->dev, "%s: no global registers to restore\n", 288 dev_err(hsotg->dev, "%s: no global registers to restore\n",
315 __func__); 289 __func__);
316 return -EINVAL; 290 return -EINVAL;
317 } 291 }
292 gr->valid = false;
318 293
319 writel(0xffffffff, hsotg->regs + GINTSTS); 294 writel(0xffffffff, hsotg->regs + GINTSTS);
320 writel(gr->gotgctl, hsotg->regs + GOTGCTL); 295 writel(gr->gotgctl, hsotg->regs + GOTGCTL);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 53b8de03f102..0ed87620941b 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -492,6 +492,7 @@ struct dwc2_gregs_backup {
492 u32 gdfifocfg; 492 u32 gdfifocfg;
493 u32 dtxfsiz[MAX_EPS_CHANNELS]; 493 u32 dtxfsiz[MAX_EPS_CHANNELS];
494 u32 gpwrdn; 494 u32 gpwrdn;
495 bool valid;
495}; 496};
496 497
497/** 498/**
@@ -521,6 +522,7 @@ struct dwc2_dregs_backup {
521 u32 doepctl[MAX_EPS_CHANNELS]; 522 u32 doepctl[MAX_EPS_CHANNELS];
522 u32 doeptsiz[MAX_EPS_CHANNELS]; 523 u32 doeptsiz[MAX_EPS_CHANNELS];
523 u32 doepdma[MAX_EPS_CHANNELS]; 524 u32 doepdma[MAX_EPS_CHANNELS];
525 bool valid;
524}; 526};
525 527
526/** 528/**
@@ -538,6 +540,7 @@ struct dwc2_hregs_backup {
538 u32 hcintmsk[MAX_EPS_CHANNELS]; 540 u32 hcintmsk[MAX_EPS_CHANNELS];
539 u32 hprt0; 541 u32 hprt0;
540 u32 hfir; 542 u32 hfir;
543 bool valid;
541}; 544};
542 545
543/** 546/**
@@ -705,9 +708,9 @@ struct dwc2_hsotg {
705 struct work_struct wf_otg; 708 struct work_struct wf_otg;
706 struct timer_list wkp_timer; 709 struct timer_list wkp_timer;
707 enum dwc2_lx_state lx_state; 710 enum dwc2_lx_state lx_state;
708 struct dwc2_gregs_backup *gr_backup; 711 struct dwc2_gregs_backup gr_backup;
709 struct dwc2_dregs_backup *dr_backup; 712 struct dwc2_dregs_backup dr_backup;
710 struct dwc2_hregs_backup *hr_backup; 713 struct dwc2_hregs_backup hr_backup;
711 714
712 struct dentry *debug_root; 715 struct dentry *debug_root;
713 struct debugfs_regset32 *regset; 716 struct debugfs_regset32 *regset;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b10377c65064..f845c41fe9e5 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
359 359
360/* Caller must hold driver lock */ 360/* Caller must hold driver lock */
361static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg, 361static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
362 struct dwc2_hcd_urb *urb, void **ep_handle, 362 struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
363 gfp_t mem_flags) 363 struct dwc2_qtd *qtd)
364{ 364{
365 struct dwc2_qtd *qtd;
366 u32 intr_mask; 365 u32 intr_mask;
367 int retval; 366 int retval;
368 int dev_speed; 367 int dev_speed;
@@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
386 return -ENODEV; 385 return -ENODEV;
387 } 386 }
388 387
389 qtd = kzalloc(sizeof(*qtd), mem_flags);
390 if (!qtd) 388 if (!qtd)
391 return -ENOMEM; 389 return -EINVAL;
392 390
393 dwc2_hcd_qtd_init(qtd, urb); 391 dwc2_hcd_qtd_init(qtd, urb);
394 retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle, 392 retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
395 mem_flags);
396 if (retval) { 393 if (retval) {
397 dev_err(hsotg->dev, 394 dev_err(hsotg->dev,
398 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n", 395 "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
399 retval); 396 retval);
400 kfree(qtd);
401 return retval; 397 return retval;
402 } 398 }
403 399
@@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2445 u32 tflags = 0; 2441 u32 tflags = 0;
2446 void *buf; 2442 void *buf;
2447 unsigned long flags; 2443 unsigned long flags;
2444 struct dwc2_qh *qh;
2445 bool qh_allocated = false;
2446 struct dwc2_qtd *qtd;
2448 2447
2449 if (dbg_urb(urb)) { 2448 if (dbg_urb(urb)) {
2450 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n"); 2449 dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
@@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2523 urb->iso_frame_desc[i].length); 2522 urb->iso_frame_desc[i].length);
2524 2523
2525 urb->hcpriv = dwc2_urb; 2524 urb->hcpriv = dwc2_urb;
2525 qh = (struct dwc2_qh *) ep->hcpriv;
2526 /* Create QH for the endpoint if it doesn't exist */
2527 if (!qh) {
2528 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
2529 if (!qh) {
2530 retval = -ENOMEM;
2531 goto fail0;
2532 }
2533 ep->hcpriv = qh;
2534 qh_allocated = true;
2535 }
2536
2537 qtd = kzalloc(sizeof(*qtd), mem_flags);
2538 if (!qtd) {
2539 retval = -ENOMEM;
2540 goto fail1;
2541 }
2526 2542
2527 spin_lock_irqsave(&hsotg->lock, flags); 2543 spin_lock_irqsave(&hsotg->lock, flags);
2528 retval = usb_hcd_link_urb_to_ep(hcd, urb); 2544 retval = usb_hcd_link_urb_to_ep(hcd, urb);
2529 if (retval) 2545 if (retval)
2530 goto fail1; 2546 goto fail2;
2531 2547
2532 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags); 2548 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
2533 if (retval) 2549 if (retval)
2534 goto fail2; 2550 goto fail3;
2535 2551
2536 if (alloc_bandwidth) { 2552 if (alloc_bandwidth) {
2537 dwc2_allocate_bus_bandwidth(hcd, 2553 dwc2_allocate_bus_bandwidth(hcd,
@@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
2543 2559
2544 return 0; 2560 return 0;
2545 2561
2546fail2: 2562fail3:
2547 dwc2_urb->priv = NULL; 2563 dwc2_urb->priv = NULL;
2548 usb_hcd_unlink_urb_from_ep(hcd, urb); 2564 usb_hcd_unlink_urb_from_ep(hcd, urb);
2549fail1: 2565fail2:
2550 spin_unlock_irqrestore(&hsotg->lock, flags); 2566 spin_unlock_irqrestore(&hsotg->lock, flags);
2551 urb->hcpriv = NULL; 2567 urb->hcpriv = NULL;
2568 kfree(qtd);
2569fail1:
2570 if (qh_allocated) {
2571 struct dwc2_qtd *qtd2, *qtd2_tmp;
2572
2573 ep->hcpriv = NULL;
2574 dwc2_hcd_qh_unlink(hsotg, qh);
2575 /* Free each QTD in the QH's QTD list */
2576 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
2577 qtd_list_entry)
2578 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
2579 dwc2_hcd_qh_free(hsotg, qh);
2580 }
2552fail0: 2581fail0:
2553 kfree(dwc2_urb); 2582 kfree(dwc2_urb);
2554 2583
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7b5841c40033..fc1054965552 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
463/* Schedule Queue Functions */ 463/* Schedule Queue Functions */
464/* Implemented in hcd_queue.c */ 464/* Implemented in hcd_queue.c */
465extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg); 465extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
466extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
467 struct dwc2_hcd_urb *urb,
468 gfp_t mem_flags);
466extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 469extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
467extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 470extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
468extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 471extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
@@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
471 474
472extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); 475extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
473extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 476extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
474 struct dwc2_qh **qh, gfp_t mem_flags); 477 struct dwc2_qh *qh);
475 478
476/* Unlinks and frees a QTD */ 479/* Unlinks and frees a QTD */
477static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, 480static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 9b5c36256627..3ad63d392e13 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
191 * 191 *
192 * Return: Pointer to the newly allocated QH, or NULL on error 192 * Return: Pointer to the newly allocated QH, or NULL on error
193 */ 193 */
194static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 194struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
195 struct dwc2_hcd_urb *urb, 195 struct dwc2_hcd_urb *urb,
196 gfp_t mem_flags) 196 gfp_t mem_flags)
197{ 197{
@@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
767 * 767 *
768 * @hsotg: The DWC HCD structure 768 * @hsotg: The DWC HCD structure
769 * @qtd: The QTD to add 769 * @qtd: The QTD to add
770 * @qh: Out parameter to return queue head 770 * @qh: Queue head to add qtd to
771 * @atomic_alloc: Flag to do atomic alloc if needed
772 * 771 *
773 * Return: 0 if successful, negative error code otherwise 772 * Return: 0 if successful, negative error code otherwise
774 * 773 *
775 * Finds the correct QH to place the QTD into. If it does not find a QH, it 774 * If the QH to which the QTD is added is not currently scheduled, it is placed
776 * will create a new QH. If the QH to which the QTD is added is not currently 775 * into the proper schedule based on its EP type.
777 * scheduled, it is placed into the proper schedule based on its EP type.
778 */ 776 */
779int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 777int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
780 struct dwc2_qh **qh, gfp_t mem_flags) 778 struct dwc2_qh *qh)
781{ 779{
782 struct dwc2_hcd_urb *urb = qtd->urb;
783 int allocated = 0;
784 int retval; 780 int retval;
785 781
786 /* 782 if (unlikely(!qh)) {
787 * Get the QH which holds the QTD-list to insert to. Create QH if it 783 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
788 * doesn't exist. 784 retval = -EINVAL;
789 */ 785 goto fail;
790 if (*qh == NULL) {
791 *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
792 if (*qh == NULL)
793 return -ENOMEM;
794 allocated = 1;
795 } 786 }
796 787
797 retval = dwc2_hcd_qh_add(hsotg, *qh); 788 retval = dwc2_hcd_qh_add(hsotg, qh);
798 if (retval) 789 if (retval)
799 goto fail; 790 goto fail;
800 791
801 qtd->qh = *qh; 792 qtd->qh = qh;
802 list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list); 793 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
803 794
804 return 0; 795 return 0;
805
806fail: 796fail:
807 if (allocated) {
808 struct dwc2_qtd *qtd2, *qtd2_tmp;
809 struct dwc2_qh *qh_tmp = *qh;
810
811 *qh = NULL;
812 dwc2_hcd_qh_unlink(hsotg, qh_tmp);
813
814 /* Free each QTD in the QH's QTD list */
815 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
816 qtd_list_entry)
817 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
818
819 dwc2_hcd_qh_free(hsotg, qh_tmp);
820 }
821
822 return retval; 797 return retval;
823} 798}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5c110d8e293b..ff5773c66b84 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
446 /* Select the HS PHY interface */ 446 /* Select the HS PHY interface */
447 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) { 447 switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
448 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI: 448 case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
449 if (!strncmp(dwc->hsphy_interface, "utmi", 4)) { 449 if (dwc->hsphy_interface &&
450 !strncmp(dwc->hsphy_interface, "utmi", 4)) {
450 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI; 451 reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
451 break; 452 break;
452 } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) { 453 } else if (dwc->hsphy_interface &&
454 !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
453 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI; 455 reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
454 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); 456 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
455 } else { 457 } else {
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..69e769c35cf5 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
729 break; 729 break;
730 case USB_REQ_SET_INTERFACE:
731 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
732 dwc->start_config_issued = false;
733 /* Fall through */
730 default: 734 default:
731 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 735 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
732 ret = dwc3_ep0_delegate_req(dwc, ctrl); 736 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 4e3447bbd097..58b4657fc721 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1758,10 +1758,13 @@ unknown:
1758 * take such requests too, if that's ever needed: to work 1758 * take such requests too, if that's ever needed: to work
1759 * in config 0, etc. 1759 * in config 0, etc.
1760 */ 1760 */
1761 list_for_each_entry(f, &cdev->config->functions, list) 1761 if (cdev->config) {
1762 if (f->req_match && f->req_match(f, ctrl)) 1762 list_for_each_entry(f, &cdev->config->functions, list)
1763 goto try_fun_setup; 1763 if (f->req_match && f->req_match(f, ctrl))
1764 f = NULL; 1764 goto try_fun_setup;
1765 f = NULL;
1766 }
1767
1765 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1768 switch (ctrl->bRequestType & USB_RECIP_MASK) {
1766 case USB_RECIP_INTERFACE: 1769 case USB_RECIP_INTERFACE:
1767 if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1770 if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0495c94a23d7..289e20119fea 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -571,7 +571,7 @@ static struct config_group *function_make(
571 if (IS_ERR(fi)) 571 if (IS_ERR(fi))
572 return ERR_CAST(fi); 572 return ERR_CAST(fi);
573 573
574 ret = config_item_set_name(&fi->group.cg_item, name); 574 ret = config_item_set_name(&fi->group.cg_item, "%s", name);
575 if (ret) { 575 if (ret) {
576 usb_put_function_instance(fi); 576 usb_put_function_instance(fi);
577 return ERR_PTR(ret); 577 return ERR_PTR(ret);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 45b8c8b338df..6e7be91e6097 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
924 924
925 kiocb->private = p; 925 kiocb->private = p;
926 926
927 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 927 if (p->aio)
928 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
928 929
929 res = ffs_epfile_io(kiocb->ki_filp, p); 930 res = ffs_epfile_io(kiocb->ki_filp, p);
930 if (res == -EIOCBQUEUED) 931 if (res == -EIOCBQUEUED)
@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
968 969
969 kiocb->private = p; 970 kiocb->private = p;
970 971
971 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); 972 if (p->aio)
973 kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
972 974
973 res = ffs_epfile_io(kiocb->ki_filp, p); 975 res = ffs_epfile_io(kiocb->ki_filp, p);
974 if (res == -EIOCBQUEUED) 976 if (res == -EIOCBQUEUED)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
699 int ret; 699 int ret;
700 700
701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); 701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
702 if (ret >= HIDG_MINORS) {
703 ida_simple_remove(&hidg_ida, ret);
704 ret = -ENODEV;
705 }
702 706
703 return ret; 707 return ret;
704} 708}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d2259c663996..f936268d26c6 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
2786 return -EINVAL; 2786 return -EINVAL;
2787 } 2787 }
2788 2788
2789 curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); 2789 curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
2790 if (unlikely(!curlun)) 2790 if (unlikely(!curlun))
2791 return -ENOMEM; 2791 return -ENOMEM;
2792 2792
@@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
2796 common->luns = curlun; 2796 common->luns = curlun;
2797 common->nluns = nluns; 2797 common->nluns = nluns;
2798 2798
2799 pr_info("Number of LUNs=%d\n", common->nluns);
2800
2801 return 0; 2799 return 0;
2802} 2800}
2803EXPORT_SYMBOL_GPL(fsg_common_set_nluns); 2801EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
@@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
3563 struct fsg_opts *opts = fsg_opts_from_func_inst(fi); 3561 struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
3564 struct fsg_common *common = opts->common; 3562 struct fsg_common *common = opts->common;
3565 struct fsg_dev *fsg; 3563 struct fsg_dev *fsg;
3564 unsigned nluns, i;
3566 3565
3567 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); 3566 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
3568 if (unlikely(!fsg)) 3567 if (unlikely(!fsg))
3569 return ERR_PTR(-ENOMEM); 3568 return ERR_PTR(-ENOMEM);
3570 3569
3571 mutex_lock(&opts->lock); 3570 mutex_lock(&opts->lock);
3571 if (!opts->refcnt) {
3572 for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
3573 if (common->luns[i])
3574 nluns = i + 1;
3575 if (!nluns)
3576 pr_warn("No LUNS defined, continuing anyway\n");
3577 else
3578 common->nluns = nluns;
3579 pr_info("Number of LUNs=%u\n", common->nluns);
3580 }
3572 opts->refcnt++; 3581 opts->refcnt++;
3573 mutex_unlock(&opts->lock); 3582 mutex_unlock(&opts->lock);
3583
3574 fsg->function.name = FSG_DRIVER_DESC; 3584 fsg->function.name = FSG_DRIVER_DESC;
3575 fsg->function.bind = fsg_bind; 3585 fsg->function.bind = fsg_bind;
3576 fsg->function.unbind = fsg_unbind; 3586 fsg->function.unbind = fsg_unbind;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 6316aa5b1c49..ad50a67c1465 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1145 if (opts->id && !midi->id) { 1145 if (opts->id && !midi->id) {
1146 status = -ENOMEM; 1146 status = -ENOMEM;
1147 mutex_unlock(&opts->lock); 1147 mutex_unlock(&opts->lock);
1148 goto kstrdup_fail; 1148 goto setup_fail;
1149 } 1149 }
1150 midi->in_ports = opts->in_ports; 1150 midi->in_ports = opts->in_ports;
1151 midi->out_ports = opts->out_ports; 1151 midi->out_ports = opts->out_ports;
@@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
1164 1164
1165 return &midi->func; 1165 return &midi->func;
1166 1166
1167kstrdup_fail:
1168 f_midi_unregister_card(midi);
1169setup_fail: 1167setup_fail:
1170 for (--i; i >= 0; i--) 1168 for (--i; i >= 0; i--)
1171 kfree(midi->in_port[i]); 1169 kfree(midi->in_port[i]);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..357f63f47b42 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
1248 1248
1249static inline int gprinter_get_minor(void) 1249static inline int gprinter_get_minor(void)
1250{ 1250{
1251 return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); 1251 int ret;
1252
1253 ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
1254 if (ret >= PRINTER_MINORS) {
1255 ida_simple_remove(&printer_ida, ret);
1256 ret = -ENODEV;
1257 }
1258
1259 return ret;
1252} 1260}
1253 1261
1254static inline void gprinter_put_minor(int minor) 1262static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
1162 factor = 1000; 1162 factor = 1000;
1163 } else { 1163 } else {
1164 ep_desc = &hs_epin_desc; 1164 ep_desc = &hs_epin_desc;
1165 factor = 125; 1165 factor = 8000;
1166 } 1166 }
1167 1167
1168 /* pre-compute some values for iso_complete() */ 1168 /* pre-compute some values for iso_complete() */
1169 uac2->p_framesize = opts->p_ssize * 1169 uac2->p_framesize = opts->p_ssize *
1170 num_channels(opts->p_chmask); 1170 num_channels(opts->p_chmask);
1171 rate = opts->p_srate * uac2->p_framesize; 1171 rate = opts->p_srate * uac2->p_framesize;
1172 uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; 1172 uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, 1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
1174 prm->max_psize); 1174 prm->max_psize);
1175 1175
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..1efa61265d8d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
779 /* The current hw dequeue pointer */ 779 /* The current hw dequeue pointer */
780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); 780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
781 deq_ptr_64 = tmp_32; 781 deq_ptr_64 = tmp_32;
782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); 782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
783 deq_ptr_64 |= ((u64)tmp_32 << 32); 783 deq_ptr_64 |= ((u64)tmp_32 << 32);
784 784
785 /* we have the dma addr of next bd that will be fetched by hardware */ 785 /* we have the dma addr of next bd that will be fetched by hardware */
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index e547ea7f56b1..1137e3384218 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1171 udc_name, fotg210); 1171 udc_name, fotg210);
1172 if (ret < 0) { 1172 if (ret < 0) {
1173 pr_err("request_irq error (%d)\n", ret); 1173 pr_err("request_irq error (%d)\n", ret);
1174 goto err_irq; 1174 goto err_req;
1175 } 1175 }
1176 1176
1177 ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget); 1177 ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
@@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev)
1183 return 0; 1183 return 0;
1184 1184
1185err_add_udc: 1185err_add_udc:
1186err_irq:
1187 free_irq(ires->start, fotg210); 1186 free_irq(ires->start, fotg210);
1188 1187
1189err_req: 1188err_req:
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..5da37c957b53 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
2167 return -ENODEV; 2167 return -ENODEV;
2168 } 2168 }
2169 2169
2170 udc->phy_regs = ioremap(r->start, resource_size(r)); 2170 udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2171 if (udc->phy_regs == NULL) { 2171 if (udc->phy_regs == NULL) {
2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2173 return -EBUSY; 2173 return -EBUSY;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..89ed5e71a199 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
60int usb_gadget_map_request(struct usb_gadget *gadget, 60int usb_gadget_map_request(struct usb_gadget *gadget,
61 struct usb_request *req, int is_in) 61 struct usb_request *req, int is_in)
62{ 62{
63 struct device *dev = gadget->dev.parent;
64
63 if (req->length == 0) 65 if (req->length == 0)
64 return 0; 66 return 0;
65 67
66 if (req->num_sgs) { 68 if (req->num_sgs) {
67 int mapped; 69 int mapped;
68 70
69 mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, 71 mapped = dma_map_sg(dev, req->sg, req->num_sgs,
70 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 72 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
71 if (mapped == 0) { 73 if (mapped == 0) {
72 dev_err(&gadget->dev, "failed to map SGs\n"); 74 dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
75 77
76 req->num_mapped_sgs = mapped; 78 req->num_mapped_sgs = mapped;
77 } else { 79 } else {
78 req->dma = dma_map_single(&gadget->dev, req->buf, req->length, 80 req->dma = dma_map_single(dev, req->buf, req->length,
79 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 81 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
80 82
81 if (dma_mapping_error(&gadget->dev, req->dma)) { 83 if (dma_mapping_error(dev, req->dma)) {
82 dev_err(&gadget->dev, "failed to map buffer\n"); 84 dev_err(dev, "failed to map buffer\n");
83 return -EFAULT; 85 return -EFAULT;
84 } 86 }
85 } 87 }
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
95 return; 97 return;
96 98
97 if (req->num_mapped_sgs) { 99 if (req->num_mapped_sgs) {
98 dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, 100 dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
99 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 101 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
100 102
101 req->num_mapped_sgs = 0; 103 req->num_mapped_sgs = 0;
102 } else { 104 } else {
103 dma_unmap_single(&gadget->dev, req->dma, req->length, 105 dma_unmap_single(gadget->dev.parent, req->dma, req->length,
104 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 106 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
105 } 107 }
106} 108}
@@ -321,6 +323,7 @@ err4:
321 323
322err3: 324err3:
323 put_device(&udc->dev); 325 put_device(&udc->dev);
326 device_del(&gadget->dev);
324 327
325err2: 328err2:
326 put_device(&gadget->dev); 329 put_device(&gadget->dev);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
981 int completed, modified; 981 int completed, modified;
982 __hc32 *prev; 982 __hc32 *prev;
983 983
984 /* Is this ED already invisible to the hardware? */
985 if (ed->state == ED_IDLE)
986 goto ed_idle;
987
988 /* only take off EDs that the HC isn't using, accounting for 984 /* only take off EDs that the HC isn't using, accounting for
989 * frame counter wraps and EDs with partially retired TDs 985 * frame counter wraps and EDs with partially retired TDs
990 */ 986 */
@@ -1012,12 +1008,10 @@ skip_ed:
1012 } 1008 }
1013 1009
1014 /* ED's now officially unlinked, hc doesn't see */ 1010 /* ED's now officially unlinked, hc doesn't see */
1015 ed->state = ED_IDLE;
1016 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); 1011 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1017 ed->hwNextED = 0; 1012 ed->hwNextED = 0;
1018 wmb(); 1013 wmb();
1019 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); 1014 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
1020ed_idle:
1021 1015
1022 /* reentrancy: if we drop the schedule lock, someone might 1016 /* reentrancy: if we drop the schedule lock, someone might
1023 * have modified this list. normally it's just prepending 1017 * have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
1088 if (list_empty(&ed->td_list)) { 1082 if (list_empty(&ed->td_list)) {
1089 *last = ed->ed_next; 1083 *last = ed->ed_next;
1090 ed->ed_next = NULL; 1084 ed->ed_next = NULL;
1085 ed->state = ED_IDLE;
1091 list_del(&ed->in_use_list); 1086 list_del(&ed->in_use_list);
1092 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1087 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1093 *last = ed->ed_next; 1088 *last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
58#define CCR_PM_CKRNEN 0x0002 58#define CCR_PM_CKRNEN 0x0002
59#define CCR_PM_USBPW1 0x0004 59#define CCR_PM_USBPW1 0x0004
60#define CCR_PM_USBPW2 0x0008 60#define CCR_PM_USBPW2 0x0008
61#define CCR_PM_USBPW3 0x0008 61#define CCR_PM_USBPW3 0x0010
62#define CCR_PM_PMEE 0x0100 62#define CCR_PM_PMEE 0x0100
63#define CCR_PM_PMES 0x8000 63#define CCR_PM_PMES 0x8000
64 64
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
484 u32 pls = status_reg & PORT_PLS_MASK; 484 u32 pls = status_reg & PORT_PLS_MASK;
485 485
486 /* resume state is a xHCI internal state. 486 /* resume state is a xHCI internal state.
487 * Do not report it to usb core. 487 * Do not report it to usb core, instead, pretend to be U3,
488 * thus usb core knows it's not ready for transfer
488 */ 489 */
489 if (pls == XDEV_RESUME) 490 if (pls == XDEV_RESUME) {
491 *status |= USB_SS_PORT_LS_U3;
490 return; 492 return;
493 }
491 494
492 /* When the CAS bit is set then warm reset 495 /* When the CAS bit is set then warm reset
493 * should be performed on port 496 * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
588 status |= USB_PORT_STAT_C_RESET << 16; 591 status |= USB_PORT_STAT_C_RESET << 16;
589 /* USB3.0 only */ 592 /* USB3.0 only */
590 if (hcd->speed == HCD_USB3) { 593 if (hcd->speed == HCD_USB3) {
591 if ((raw_port_status & PORT_PLC)) 594 /* Port link change with port in resume state should not be
595 * reported to usbcore, as this is an internal state to be
596 * handled by xhci driver. Reporting PLC to usbcore may
597 * cause usbcore clearing PLC first and port change event
598 * irq won't be generated.
599 */
600 if ((raw_port_status & PORT_PLC) &&
601 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
592 status |= USB_PORT_STAT_C_LINK_STATE << 16; 602 status |= USB_PORT_STAT_C_LINK_STATE << 16;
593 if ((raw_port_status & PORT_WRC)) 603 if ((raw_port_status & PORT_WRC))
594 status |= USB_PORT_STAT_C_BH_RESET << 16; 604 status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1120 spin_lock_irqsave(&xhci->lock, flags); 1130 spin_lock_irqsave(&xhci->lock, flags);
1121 1131
1122 if (hcd->self.root_hub->do_remote_wakeup) { 1132 if (hcd->self.root_hub->do_remote_wakeup) {
1123 if (bus_state->resuming_ports) { 1133 if (bus_state->resuming_ports || /* USB2 */
1134 bus_state->port_remote_wakeup) { /* USB3 */
1124 spin_unlock_irqrestore(&xhci->lock, flags); 1135 spin_unlock_irqrestore(&xhci->lock, flags);
1125 xhci_dbg(xhci, "suspend failed because " 1136 xhci_dbg(xhci, "suspend failed because a port is resuming\n");
1126 "a port is resuming\n");
1127 return -EBUSY; 1137 return -EBUSY;
1128 } 1138 }
1129 } 1139 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1427 /* Attempt to use the ring cache */ 1427 /* Attempt to use the ring cache */
1428 if (virt_dev->num_rings_cached == 0) 1428 if (virt_dev->num_rings_cached == 0)
1429 return -ENOMEM; 1429 return -ENOMEM;
1430 virt_dev->num_rings_cached--;
1430 virt_dev->eps[ep_index].new_ring = 1431 virt_dev->eps[ep_index].new_ring =
1431 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1432 virt_dev->ring_cache[virt_dev->num_rings_cached];
1432 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1433 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1433 virt_dev->num_rings_cached--;
1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1435 1, type); 1435 1, type);
1436 } 1436 }
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1792 int size; 1792 int size;
1793 int i, j, num_ports; 1793 int i, j, num_ports;
1794 1794
1795 del_timer_sync(&xhci->cmd_timer); 1795 if (timer_pending(&xhci->cmd_timer))
1796 del_timer_sync(&xhci->cmd_timer);
1796 1797
1797 /* Free the Event Ring Segment Table and the actual Event Ring */ 1798 /* Free the Event Ring Segment Table and the actual Event Ring */
1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/acpi.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
28#include "xhci-trace.h" 29#include "xhci-trace.h"
29 30
31#define PORT2_SSIC_CONFIG_REG2 0x883c
32#define PROG_DONE (1 << 30)
33#define SSIC_PORT_UNUSED (1 << 31)
34
30/* Device for a quirk */ 35/* Device for a quirk */
31#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 36#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
32#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 37#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
176} 181}
177 182
178/* 183/*
184 * In some Intel xHCI controllers, in order to get D3 working,
185 * through a vendor specific SSIC CONFIG register at offset 0x883c,
186 * SSIC PORT need to be marked as "unused" before putting xHCI
187 * into D3. After D3 exit, the SSIC port need to be marked as "used".
188 * Without this change, xHCI might not enter D3 state.
179 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear 189 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
180 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 190 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
181 */ 191 */
182static void xhci_pme_quirk(struct xhci_hcd *xhci) 192static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
183{ 193{
194 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
195 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
184 u32 val; 196 u32 val;
185 void __iomem *reg; 197 void __iomem *reg;
186 198
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
200 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
201
202 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
203
204 /* Notify SSIC that SSIC profile programming is not done */
205 val = readl(reg) & ~PROG_DONE;
206 writel(val, reg);
207
208 /* Mark SSIC port as unused(suspend) or used(resume) */
209 val = readl(reg);
210 if (suspend)
211 val |= SSIC_PORT_UNUSED;
212 else
213 val &= ~SSIC_PORT_UNUSED;
214 writel(val, reg);
215
216 /* Notify SSIC that SSIC profile programming is done */
217 val = readl(reg) | PROG_DONE;
218 writel(val, reg);
219 readl(reg);
220 }
221
187 reg = (void __iomem *) xhci->cap_regs + 0x80a4; 222 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
188 val = readl(reg); 223 val = readl(reg);
189 writel(val | BIT(28), reg); 224 writel(val | BIT(28), reg);
190 readl(reg); 225 readl(reg);
191} 226}
192 227
228#ifdef CONFIG_ACPI
229static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
230{
231 static const u8 intel_dsm_uuid[] = {
232 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
233 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
234 };
235 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
236}
237#else
238 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
239#endif /* CONFIG_ACPI */
240
193/* called during probe() after chip reset completes */ 241/* called during probe() after chip reset completes */
194static int xhci_pci_setup(struct usb_hcd *hcd) 242static int xhci_pci_setup(struct usb_hcd *hcd)
195{ 243{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
263 HCC_MAX_PSA(xhci->hcc_params) >= 4) 311 HCC_MAX_PSA(xhci->hcc_params) >= 4)
264 xhci->shared_hcd->can_do_streams = 1; 312 xhci->shared_hcd->can_do_streams = 1;
265 313
314 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
315 xhci_pme_acpi_rtd3_enable(dev);
316
266 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ 317 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
267 pm_runtime_put_noidle(&dev->dev); 318 pm_runtime_put_noidle(&dev->dev);
268 319
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
307 pdev->no_d3cold = true; 358 pdev->no_d3cold = true;
308 359
309 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 360 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
310 xhci_pme_quirk(xhci); 361 xhci_pme_quirk(hcd, true);
311 362
312 return xhci_suspend(xhci, do_wakeup); 363 return xhci_suspend(xhci, do_wakeup);
313} 364}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
340 usb_enable_intel_xhci_ports(pdev); 391 usb_enable_intel_xhci_ports(pdev);
341 392
342 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 393 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
343 xhci_pme_quirk(xhci); 394 xhci_pme_quirk(hcd, false);
344 395
345 retval = xhci_resume(xhci, hibernated); 396 retval = xhci_resume(xhci, hibernated);
346 return retval; 397 return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..32f4d564494a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
82 return 0; 82 return 0;
83 /* offset in TRBs */ 83 /* offset in TRBs */
84 segment_offset = trb - seg->trbs; 84 segment_offset = trb - seg->trbs;
85 if (segment_offset > TRBS_PER_SEGMENT) 85 if (segment_offset >= TRBS_PER_SEGMENT)
86 return 0; 86 return 0;
87 return seg->dma + (segment_offset * sizeof(*trb)); 87 return seg->dma + (segment_offset * sizeof(*trb));
88} 88}
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
1546 usb_hcd_resume_root_hub(hcd); 1546 usb_hcd_resume_root_hub(hcd);
1547 } 1547 }
1548 1548
1549 if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1550 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1551
1549 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1552 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1550 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1553 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1551 1554
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..526ebc0c7e72 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3453 return -EINVAL; 3453 return -EINVAL;
3454 } 3454 }
3455 3455
3456 if (virt_dev->tt_info)
3457 old_active_eps = virt_dev->tt_info->active_eps;
3458
3456 if (virt_dev->udev != udev) { 3459 if (virt_dev->udev != udev) {
3457 /* If the virt_dev and the udev does not match, this virt_dev 3460 /* If the virt_dev and the udev does not match, this virt_dev
3458 * may belong to another udev. 3461 * may belong to another udev.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..ed2ebf647c38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,6 +285,7 @@ struct xhci_op_regs {
285#define XDEV_U0 (0x0 << 5) 285#define XDEV_U0 (0x0 << 5)
286#define XDEV_U2 (0x2 << 5) 286#define XDEV_U2 (0x2 << 5)
287#define XDEV_U3 (0x3 << 5) 287#define XDEV_U3 (0x3 << 5)
288#define XDEV_INACTIVE (0x6 << 5)
288#define XDEV_RESUME (0xf << 5) 289#define XDEV_RESUME (0xf << 5)
289/* true: port has power (see HCC_PPC) */ 290/* true: port has power (see HCC_PPC) */
290#define PORT_POWER (1 << 9) 291#define PORT_POWER (1 << 9)
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 30842bc195f5..92d5f718659b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb)
275#ifdef CONFIG_USB_MUSB_HOST 275#ifdef CONFIG_USB_MUSB_HOST
276 return 1; 276 return 1;
277#else 277#else
278 if (musb->port_mode == MUSB_PORT_MODE_HOST) 278 return musb->port_mode == MUSB_PORT_MODE_HOST;
279 return 1;
280 return musb->g.dev.driver != NULL;
281#endif 279#endif
282} 280}
283 281
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 8f7cb068d29b..3fcc0483a081 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
217{ 217{
218 unsigned int vbus_value; 218 unsigned int vbus_value;
219 219
220 if (!mxs_phy->regmap_anatop)
221 return false;
222
220 if (mxs_phy->port_id == 0) 223 if (mxs_phy->port_id == 0)
221 regmap_read(mxs_phy->regmap_anatop, 224 regmap_read(mxs_phy->regmap_anatop,
222 ANADIG_USB1_VBUS_DET_STAT, 225 ANADIG_USB1_VBUS_DET_STAT,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ffd739e31bfc..eac7ccaa3c85 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
187 { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ 187 { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
188 { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ 188 { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
189 { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ 189 { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
190 { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
190 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 191 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
191 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 192 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
192 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ 193 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4f70df33975a..78b4f64c6b00 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock);
121static const unsigned int dummy; /* for clarity in register access fns */ 121static const unsigned int dummy; /* for clarity in register access fns */
122 122
123enum mos_regs { 123enum mos_regs {
124 THR, /* serial port regs */ 124 MOS7720_THR, /* serial port regs */
125 RHR, 125 MOS7720_RHR,
126 IER, 126 MOS7720_IER,
127 FCR, 127 MOS7720_FCR,
128 ISR, 128 MOS7720_ISR,
129 LCR, 129 MOS7720_LCR,
130 MCR, 130 MOS7720_MCR,
131 LSR, 131 MOS7720_LSR,
132 MSR, 132 MOS7720_MSR,
133 SPR, 133 MOS7720_SPR,
134 DLL, 134 MOS7720_DLL,
135 DLM, 135 MOS7720_DLM,
136 DPR, /* parallel port regs */ 136 MOS7720_DPR, /* parallel port regs */
137 DSR, 137 MOS7720_DSR,
138 DCR, 138 MOS7720_DCR,
139 ECR, 139 MOS7720_ECR,
140 SP1_REG, /* device control regs */ 140 MOS7720_SP1_REG, /* device control regs */
141 SP2_REG, /* serial port 2 (7720 only) */ 141 MOS7720_SP2_REG, /* serial port 2 (7720 only) */
142 PP_REG, 142 MOS7720_PP_REG,
143 SP_CONTROL_REG, 143 MOS7720_SP_CONTROL_REG,
144}; 144};
145 145
146/* 146/*
@@ -150,26 +150,26 @@ enum mos_regs {
150static inline __u16 get_reg_index(enum mos_regs reg) 150static inline __u16 get_reg_index(enum mos_regs reg)
151{ 151{
152 static const __u16 mos7715_index_lookup_table[] = { 152 static const __u16 mos7715_index_lookup_table[] = {
153 0x00, /* THR */ 153 0x00, /* MOS7720_THR */
154 0x00, /* RHR */ 154 0x00, /* MOS7720_RHR */
155 0x01, /* IER */ 155 0x01, /* MOS7720_IER */
156 0x02, /* FCR */ 156 0x02, /* MOS7720_FCR */
157 0x02, /* ISR */ 157 0x02, /* MOS7720_ISR */
158 0x03, /* LCR */ 158 0x03, /* MOS7720_LCR */
159 0x04, /* MCR */ 159 0x04, /* MOS7720_MCR */
160 0x05, /* LSR */ 160 0x05, /* MOS7720_LSR */
161 0x06, /* MSR */ 161 0x06, /* MOS7720_MSR */
162 0x07, /* SPR */ 162 0x07, /* MOS7720_SPR */
163 0x00, /* DLL */ 163 0x00, /* MOS7720_DLL */
164 0x01, /* DLM */ 164 0x01, /* MOS7720_DLM */
165 0x00, /* DPR */ 165 0x00, /* MOS7720_DPR */
166 0x01, /* DSR */ 166 0x01, /* MOS7720_DSR */
167 0x02, /* DCR */ 167 0x02, /* MOS7720_DCR */
168 0x0a, /* ECR */ 168 0x0a, /* MOS7720_ECR */
169 0x01, /* SP1_REG */ 169 0x01, /* MOS7720_SP1_REG */
170 0x02, /* SP2_REG (7720 only) */ 170 0x02, /* MOS7720_SP2_REG (7720 only) */
171 0x04, /* PP_REG (7715 only) */ 171 0x04, /* MOS7720_PP_REG (7715 only) */
172 0x08, /* SP_CONTROL_REG */ 172 0x08, /* MOS7720_SP_CONTROL_REG */
173 }; 173 };
174 return mos7715_index_lookup_table[reg]; 174 return mos7715_index_lookup_table[reg];
175} 175}
@@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg)
181static inline __u16 get_reg_value(enum mos_regs reg, 181static inline __u16 get_reg_value(enum mos_regs reg,
182 unsigned int serial_portnum) 182 unsigned int serial_portnum)
183{ 183{
184 if (reg >= SP1_REG) /* control reg */ 184 if (reg >= MOS7720_SP1_REG) /* control reg */
185 return 0x0000; 185 return 0x0000;
186 186
187 else if (reg >= DPR) /* parallel port reg (7715 only) */ 187 else if (reg >= MOS7720_DPR) /* parallel port reg (7715 only) */
188 return 0x0100; 188 return 0x0100;
189 189
190 else /* serial port reg */ 190 else /* serial port reg */
@@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
252 enum mos7715_pp_modes mode) 252 enum mos7715_pp_modes mode)
253{ 253{
254 mos_parport->shadowECR = mode; 254 mos_parport->shadowECR = mode;
255 write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); 255 write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
256 mos_parport->shadowECR);
256 return 0; 257 return 0;
257} 258}
258 259
@@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
486 if (parport_prologue(pp) < 0) 487 if (parport_prologue(pp) < 0)
487 return; 488 return;
488 mos7715_change_mode(mos_parport, SPP); 489 mos7715_change_mode(mos_parport, SPP);
489 write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d); 490 write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d);
490 parport_epilogue(pp); 491 parport_epilogue(pp);
491} 492}
492 493
@@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp)
497 498
498 if (parport_prologue(pp) < 0) 499 if (parport_prologue(pp) < 0)
499 return 0; 500 return 0;
500 read_mos_reg(mos_parport->serial, dummy, DPR, &d); 501 read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d);
501 parport_epilogue(pp); 502 parport_epilogue(pp);
502 return d; 503 return d;
503} 504}
@@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
510 if (parport_prologue(pp) < 0) 511 if (parport_prologue(pp) < 0)
511 return; 512 return;
512 data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0); 513 data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
513 write_mos_reg(mos_parport->serial, dummy, DCR, data); 514 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data);
514 mos_parport->shadowDCR = data; 515 mos_parport->shadowDCR = data;
515 parport_epilogue(pp); 516 parport_epilogue(pp);
516} 517}
@@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp,
543 if (parport_prologue(pp) < 0) 544 if (parport_prologue(pp) < 0)
544 return 0; 545 return 0;
545 mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val; 546 mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
546 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 547 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
548 mos_parport->shadowDCR);
547 dcr = mos_parport->shadowDCR & 0x0f; 549 dcr = mos_parport->shadowDCR & 0x0f;
548 parport_epilogue(pp); 550 parport_epilogue(pp);
549 return dcr; 551 return dcr;
@@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp)
581 return; 583 return;
582 mos7715_change_mode(mos_parport, PS2); 584 mos7715_change_mode(mos_parport, PS2);
583 mos_parport->shadowDCR &= ~0x20; 585 mos_parport->shadowDCR &= ~0x20;
584 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 586 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
587 mos_parport->shadowDCR);
585 parport_epilogue(pp); 588 parport_epilogue(pp);
586} 589}
587 590
@@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp)
593 return; 596 return;
594 mos7715_change_mode(mos_parport, PS2); 597 mos7715_change_mode(mos_parport, PS2);
595 mos_parport->shadowDCR |= 0x20; 598 mos_parport->shadowDCR |= 0x20;
596 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 599 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
600 mos_parport->shadowDCR);
597 parport_epilogue(pp); 601 parport_epilogue(pp);
598} 602}
599 603
@@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp,
633 spin_unlock(&release_lock); 637 spin_unlock(&release_lock);
634 return; 638 return;
635 } 639 }
636 write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR); 640 write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
637 write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR); 641 mos_parport->shadowDCR);
642 write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
643 mos_parport->shadowECR);
638 spin_unlock(&release_lock); 644 spin_unlock(&release_lock);
639} 645}
640 646
@@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial)
714 init_completion(&mos_parport->syncmsg_compl); 720 init_completion(&mos_parport->syncmsg_compl);
715 721
716 /* cycle parallel port reset bit */ 722 /* cycle parallel port reset bit */
717 write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80); 723 write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80);
718 write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00); 724 write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00);
719 725
720 /* initialize device registers */ 726 /* initialize device registers */
721 mos_parport->shadowDCR = DCR_INIT_VAL; 727 mos_parport->shadowDCR = DCR_INIT_VAL;
722 write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR); 728 write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
729 mos_parport->shadowDCR);
723 mos_parport->shadowECR = ECR_INIT_VAL; 730 mos_parport->shadowECR = ECR_INIT_VAL;
724 write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR); 731 write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
732 mos_parport->shadowECR);
725 733
726 /* register with parport core */ 734 /* register with parport core */
727 mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE, 735 mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
@@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
1033 /* Initialize MCS7720 -- Write Init values to corresponding Registers 1041 /* Initialize MCS7720 -- Write Init values to corresponding Registers
1034 * 1042 *
1035 * Register Index 1043 * Register Index
1036 * 0 : THR/RHR 1044 * 0 : MOS7720_THR/MOS7720_RHR
1037 * 1 : IER 1045 * 1 : MOS7720_IER
1038 * 2 : FCR 1046 * 2 : MOS7720_FCR
1039 * 3 : LCR 1047 * 3 : MOS7720_LCR
1040 * 4 : MCR 1048 * 4 : MOS7720_MCR
1041 * 5 : LSR 1049 * 5 : MOS7720_LSR
1042 * 6 : MSR 1050 * 6 : MOS7720_MSR
1043 * 7 : SPR 1051 * 7 : MOS7720_SPR
1044 * 1052 *
1045 * 0x08 : SP1/2 Control Reg 1053 * 0x08 : SP1/2 Control Reg
1046 */ 1054 */
1047 port_number = port->port_number; 1055 port_number = port->port_number;
1048 read_mos_reg(serial, port_number, LSR, &data); 1056 read_mos_reg(serial, port_number, MOS7720_LSR, &data);
1049 1057
1050 dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data); 1058 dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
1051 1059
1052 write_mos_reg(serial, dummy, SP1_REG, 0x02); 1060 write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02);
1053 write_mos_reg(serial, dummy, SP2_REG, 0x02); 1061 write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02);
1054 1062
1055 write_mos_reg(serial, port_number, IER, 0x00); 1063 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1056 write_mos_reg(serial, port_number, FCR, 0x00); 1064 write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
1057 1065
1058 write_mos_reg(serial, port_number, FCR, 0xcf); 1066 write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
1059 mos7720_port->shadowLCR = 0x03; 1067 mos7720_port->shadowLCR = 0x03;
1060 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1068 write_mos_reg(serial, port_number, MOS7720_LCR,
1069 mos7720_port->shadowLCR);
1061 mos7720_port->shadowMCR = 0x0b; 1070 mos7720_port->shadowMCR = 0x0b;
1062 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1071 write_mos_reg(serial, port_number, MOS7720_MCR,
1072 mos7720_port->shadowMCR);
1063 1073
1064 write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00); 1074 write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00);
1065 read_mos_reg(serial, dummy, SP_CONTROL_REG, &data); 1075 read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data);
1066 data = data | (port->port_number + 1); 1076 data = data | (port->port_number + 1);
1067 write_mos_reg(serial, dummy, SP_CONTROL_REG, data); 1077 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data);
1068 mos7720_port->shadowLCR = 0x83; 1078 mos7720_port->shadowLCR = 0x83;
1069 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1079 write_mos_reg(serial, port_number, MOS7720_LCR,
1070 write_mos_reg(serial, port_number, THR, 0x0c); 1080 mos7720_port->shadowLCR);
1071 write_mos_reg(serial, port_number, IER, 0x00); 1081 write_mos_reg(serial, port_number, MOS7720_THR, 0x0c);
1082 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1072 mos7720_port->shadowLCR = 0x03; 1083 mos7720_port->shadowLCR = 0x03;
1073 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1084 write_mos_reg(serial, port_number, MOS7720_LCR,
1074 write_mos_reg(serial, port_number, IER, 0x0c); 1085 mos7720_port->shadowLCR);
1086 write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
1075 1087
1076 response = usb_submit_urb(port->read_urb, GFP_KERNEL); 1088 response = usb_submit_urb(port->read_urb, GFP_KERNEL);
1077 if (response) 1089 if (response)
@@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port)
1144 usb_kill_urb(port->write_urb); 1156 usb_kill_urb(port->write_urb);
1145 usb_kill_urb(port->read_urb); 1157 usb_kill_urb(port->read_urb);
1146 1158
1147 write_mos_reg(serial, port->port_number, MCR, 0x00); 1159 write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00);
1148 write_mos_reg(serial, port->port_number, IER, 0x00); 1160 write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00);
1149 1161
1150 mos7720_port->open = 0; 1162 mos7720_port->open = 0;
1151} 1163}
@@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
1169 data = mos7720_port->shadowLCR & ~UART_LCR_SBC; 1181 data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
1170 1182
1171 mos7720_port->shadowLCR = data; 1183 mos7720_port->shadowLCR = data;
1172 write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR); 1184 write_mos_reg(serial, port->port_number, MOS7720_LCR,
1185 mos7720_port->shadowLCR);
1173} 1186}
1174 1187
1175/* 1188/*
@@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty)
1297 /* if we are implementing RTS/CTS, toggle that line */ 1310 /* if we are implementing RTS/CTS, toggle that line */
1298 if (tty->termios.c_cflag & CRTSCTS) { 1311 if (tty->termios.c_cflag & CRTSCTS) {
1299 mos7720_port->shadowMCR &= ~UART_MCR_RTS; 1312 mos7720_port->shadowMCR &= ~UART_MCR_RTS;
1300 write_mos_reg(port->serial, port->port_number, MCR, 1313 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1301 mos7720_port->shadowMCR); 1314 mos7720_port->shadowMCR);
1302 } 1315 }
1303} 1316}
@@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
1327 /* if we are implementing RTS/CTS, toggle that line */ 1340 /* if we are implementing RTS/CTS, toggle that line */
1328 if (tty->termios.c_cflag & CRTSCTS) { 1341 if (tty->termios.c_cflag & CRTSCTS) {
1329 mos7720_port->shadowMCR |= UART_MCR_RTS; 1342 mos7720_port->shadowMCR |= UART_MCR_RTS;
1330 write_mos_reg(port->serial, port->port_number, MCR, 1343 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1331 mos7720_port->shadowMCR); 1344 mos7720_port->shadowMCR);
1332 } 1345 }
1333} 1346}
@@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
1352 dev_dbg(&port->dev, "Sending Setting Commands ..........\n"); 1365 dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
1353 port_number = port->port_number; 1366 port_number = port->port_number;
1354 1367
1355 write_mos_reg(serial, port_number, IER, 0x00); 1368 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1356 write_mos_reg(serial, port_number, FCR, 0x00); 1369 write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
1357 write_mos_reg(serial, port_number, FCR, 0xcf); 1370 write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
1358 mos7720_port->shadowMCR = 0x0b; 1371 mos7720_port->shadowMCR = 0x0b;
1359 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1372 write_mos_reg(serial, port_number, MOS7720_MCR,
1360 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00); 1373 mos7720_port->shadowMCR);
1374 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00);
1361 1375
1362 /*********************************************** 1376 /***********************************************
1363 * Set for higher rates * 1377 * Set for higher rates *
1364 ***********************************************/ 1378 ***********************************************/
1365 /* writing baud rate verbatum into uart clock field clearly not right */ 1379 /* writing baud rate verbatum into uart clock field clearly not right */
1366 if (port_number == 0) 1380 if (port_number == 0)
1367 sp_reg = SP1_REG; 1381 sp_reg = MOS7720_SP1_REG;
1368 else 1382 else
1369 sp_reg = SP2_REG; 1383 sp_reg = MOS7720_SP2_REG;
1370 write_mos_reg(serial, dummy, sp_reg, baud * 0x10); 1384 write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
1371 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03); 1385 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03);
1372 mos7720_port->shadowMCR = 0x2b; 1386 mos7720_port->shadowMCR = 0x2b;
1373 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1387 write_mos_reg(serial, port_number, MOS7720_MCR,
1388 mos7720_port->shadowMCR);
1374 1389
1375 /*********************************************** 1390 /***********************************************
1376 * Set DLL/DLM 1391 * Set DLL/DLM
1377 ***********************************************/ 1392 ***********************************************/
1378 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; 1393 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
1379 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1394 write_mos_reg(serial, port_number, MOS7720_LCR,
1380 write_mos_reg(serial, port_number, DLL, 0x01); 1395 mos7720_port->shadowLCR);
1381 write_mos_reg(serial, port_number, DLM, 0x00); 1396 write_mos_reg(serial, port_number, MOS7720_DLL, 0x01);
1397 write_mos_reg(serial, port_number, MOS7720_DLM, 0x00);
1382 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; 1398 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
1383 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1399 write_mos_reg(serial, port_number, MOS7720_LCR,
1400 mos7720_port->shadowLCR);
1384 1401
1385 return 0; 1402 return 0;
1386} 1403}
@@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
1488 1505
1489 /* Enable access to divisor latch */ 1506 /* Enable access to divisor latch */
1490 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB; 1507 mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
1491 write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); 1508 write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
1492 1509
1493 /* Write the divisor */ 1510 /* Write the divisor */
1494 write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff)); 1511 write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff));
1495 write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8)); 1512 write_mos_reg(serial, number, MOS7720_DLM,
1513 (__u8)((divisor & 0xff00) >> 8));
1496 1514
1497 /* Disable access to divisor latch */ 1515 /* Disable access to divisor latch */
1498 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB; 1516 mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
1499 write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR); 1517 write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
1500 1518
1501 return status; 1519 return status;
1502} 1520}
@@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty,
1600 1618
1601 1619
1602 /* Disable Interrupts */ 1620 /* Disable Interrupts */
1603 write_mos_reg(serial, port_number, IER, 0x00); 1621 write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
1604 write_mos_reg(serial, port_number, FCR, 0x00); 1622 write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
1605 write_mos_reg(serial, port_number, FCR, 0xcf); 1623 write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
1606 1624
1607 /* Send the updated LCR value to the mos7720 */ 1625 /* Send the updated LCR value to the mos7720 */
1608 write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR); 1626 write_mos_reg(serial, port_number, MOS7720_LCR,
1627 mos7720_port->shadowLCR);
1609 mos7720_port->shadowMCR = 0x0b; 1628 mos7720_port->shadowMCR = 0x0b;
1610 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1629 write_mos_reg(serial, port_number, MOS7720_MCR,
1630 mos7720_port->shadowMCR);
1611 1631
1612 /* set up the MCR register and send it to the mos7720 */ 1632 /* set up the MCR register and send it to the mos7720 */
1613 mos7720_port->shadowMCR = UART_MCR_OUT2; 1633 mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty,
1619 /* To set hardware flow control to the specified * 1639 /* To set hardware flow control to the specified *
1620 * serial port, in SP1/2_CONTROL_REG */ 1640 * serial port, in SP1/2_CONTROL_REG */
1621 if (port_number) 1641 if (port_number)
1622 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); 1642 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
1643 0x01);
1623 else 1644 else
1624 write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); 1645 write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
1646 0x02);
1625 1647
1626 } else 1648 } else
1627 mos7720_port->shadowMCR &= ~(UART_MCR_XONANY); 1649 mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
1628 1650
1629 write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR); 1651 write_mos_reg(serial, port_number, MOS7720_MCR,
1652 mos7720_port->shadowMCR);
1630 1653
1631 /* Determine divisor based on baud rate */ 1654 /* Determine divisor based on baud rate */
1632 baud = tty_get_baud_rate(tty); 1655 baud = tty_get_baud_rate(tty);
@@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty,
1639 if (baud >= 230400) { 1662 if (baud >= 230400) {
1640 set_higher_rates(mos7720_port, baud); 1663 set_higher_rates(mos7720_port, baud);
1641 /* Enable Interrupts */ 1664 /* Enable Interrupts */
1642 write_mos_reg(serial, port_number, IER, 0x0c); 1665 write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
1643 return; 1666 return;
1644 } 1667 }
1645 1668
@@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty,
1650 if (cflag & CBAUD) 1673 if (cflag & CBAUD)
1651 tty_encode_baud_rate(tty, baud, baud); 1674 tty_encode_baud_rate(tty, baud, baud);
1652 /* Enable Interrupts */ 1675 /* Enable Interrupts */
1653 write_mos_reg(serial, port_number, IER, 0x0c); 1676 write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
1654 1677
1655 if (port->read_urb->status != -EINPROGRESS) { 1678 if (port->read_urb->status != -EINPROGRESS) {
1656 status = usb_submit_urb(port->read_urb, GFP_KERNEL); 1679 status = usb_submit_urb(port->read_urb, GFP_KERNEL);
@@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty,
1725 1748
1726 count = mos7720_chars_in_buffer(tty); 1749 count = mos7720_chars_in_buffer(tty);
1727 if (count == 0) { 1750 if (count == 0) {
1728 read_mos_reg(port->serial, port_number, LSR, &data); 1751 read_mos_reg(port->serial, port_number, MOS7720_LSR, &data);
1729 if ((data & (UART_LSR_TEMT | UART_LSR_THRE)) 1752 if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
1730 == (UART_LSR_TEMT | UART_LSR_THRE)) { 1753 == (UART_LSR_TEMT | UART_LSR_THRE)) {
1731 dev_dbg(&port->dev, "%s -- Empty\n", __func__); 1754 dev_dbg(&port->dev, "%s -- Empty\n", __func__);
@@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty,
1782 mcr &= ~UART_MCR_LOOP; 1805 mcr &= ~UART_MCR_LOOP;
1783 1806
1784 mos7720_port->shadowMCR = mcr; 1807 mos7720_port->shadowMCR = mcr;
1785 write_mos_reg(port->serial, port->port_number, MCR, 1808 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1786 mos7720_port->shadowMCR); 1809 mos7720_port->shadowMCR);
1787 1810
1788 return 0; 1811 return 0;
@@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
1827 } 1850 }
1828 1851
1829 mos7720_port->shadowMCR = mcr; 1852 mos7720_port->shadowMCR = mcr;
1830 write_mos_reg(port->serial, port->port_number, MCR, 1853 write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
1831 mos7720_port->shadowMCR); 1854 mos7720_port->shadowMCR);
1832 1855
1833 return 0; 1856 return 0;
@@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial)
1942 } 1965 }
1943#endif 1966#endif
1944 /* LSR For Port 1 */ 1967 /* LSR For Port 1 */
1945 read_mos_reg(serial, 0, LSR, &data); 1968 read_mos_reg(serial, 0, MOS7720_LSR, &data);
1946 dev_dbg(&dev->dev, "LSR:%x\n", data); 1969 dev_dbg(&dev->dev, "LSR:%x\n", data);
1947 1970
1948 return 0; 1971 return 0;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index f0c0c53359ad..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), 1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ 1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1102 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
1103 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1105 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1106 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1765,6 +1767,7 @@ static const struct usb_device_id option_ids[] = {
1765 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, 1767 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
1766 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1768 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1767 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1769 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1770 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1768 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1771 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1769 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1772 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1770 { } /* Terminating entry */ 1773 { } /* Terminating entry */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
148 {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
149 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ 148 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
150 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ 149 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
151 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ 150 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
158 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 157 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
159 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 158 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 159 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
161 161
162 /* Huawei devices */ 162 /* Huawei devices */
163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), 289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
291 }, 291 },
292 { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
292 /* AT&T Direct IP LTE modems */ 293 /* AT&T Direct IP LTE modems */
293 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), 294 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
294 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 529066bbc7e8..46f1f13b41f1 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
1306 tty_unregister_driver(usb_serial_tty_driver); 1306 tty_unregister_driver(usb_serial_tty_driver);
1307 put_tty_driver(usb_serial_tty_driver); 1307 put_tty_driver(usb_serial_tty_driver);
1308 bus_unregister(&usb_serial_bus_type); 1308 bus_unregister(&usb_serial_bus_type);
1309 idr_destroy(&serial_minors);
1309} 1310}
1310 1311
1311 1312
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2066 US_FL_NO_READ_DISC_INFO ), 2066 US_FL_NO_READ_DISC_INFO ),
2067 2067
2068/* Reported by Oliver Neukum <oneukum@suse.com>
2069 * This device morphes spontaneously into another device if the access
2070 * pattern of Windows isn't followed. Thus writable media would be dirty
2071 * if the initial instance is used. So the device is limited to its
2072 * virtual CD.
2073 * And yes, the concept that BCD goes up to 9 is not heeded */
2074UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
2075 "ZTE,Incorporated",
2076 "ZTE WCDMA Technologies MSM",
2077 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2078 US_FL_SINGLE_LUN ),
2079
2068/* Reported by Sven Geggus <sven-usbst@geggus.net> 2080/* Reported by Sven Geggus <sven-usbst@geggus.net>
2069 * This encrypted pen drive returns bogus data for the initial READ(10). 2081 * This encrypted pen drive returns bogus data for the initial READ(10).
2070 */ 2082 */
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
2074 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2086 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2075 US_FL_INITIAL_READ10 ), 2087 US_FL_INITIAL_READ10 ),
2076 2088
2089/* Reported by Hans de Goede <hdegoede@redhat.com>
2090 * These are mini projectors using USB for both power and video data transport
2091 * The usb-storage interface is a virtual windows driver CD, which the gm12u320
2092 * driver automatically converts into framebuffer & kms dri device nodes.
2093 */
2094UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
2095 "Grain-media Technology Corp.",
2096 "USB3.0 Device GM12U320",
2097 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2098 US_FL_IGNORE_DEVICE ),
2099
2077/* Patch by Richard Schütz <r.schtz@t-online.de> 2100/* Patch by Richard Schütz <r.schtz@t-online.de>
2078 * This external hard drive enclosure uses a JMicron chip which 2101 * This external hard drive enclosure uses a JMicron chip which
2079 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ 2102 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
689} 689}
690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); 690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
691 691
692static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
693 char *buf)
694{
695 struct vfio_device *device;
696
697 mutex_lock(&group->device_lock);
698 list_for_each_entry(device, &group->device_list, group_next) {
699 if (!strcmp(dev_name(device->dev), buf)) {
700 vfio_device_get(device);
701 break;
702 }
703 }
704 mutex_unlock(&group->device_lock);
705
706 return device;
707}
708
692/* 709/*
693 * Caller must hold a reference to the vfio_device 710 * Caller must hold a reference to the vfio_device
694 */ 711 */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1198{ 1215{
1199 struct vfio_device *device; 1216 struct vfio_device *device;
1200 struct file *filep; 1217 struct file *filep;
1201 int ret = -ENODEV; 1218 int ret;
1202 1219
1203 if (0 == atomic_read(&group->container_users) || 1220 if (0 == atomic_read(&group->container_users) ||
1204 !group->container->iommu_driver || !vfio_group_viable(group)) 1221 !group->container->iommu_driver || !vfio_group_viable(group))
1205 return -EINVAL; 1222 return -EINVAL;
1206 1223
1207 mutex_lock(&group->device_lock); 1224 device = vfio_device_get_from_name(group, buf);
1208 list_for_each_entry(device, &group->device_list, group_next) { 1225 if (!device)
1209 if (strcmp(dev_name(device->dev), buf)) 1226 return -ENODEV;
1210 continue;
1211 1227
1212 ret = device->ops->open(device->device_data); 1228 ret = device->ops->open(device->device_data);
1213 if (ret) 1229 if (ret) {
1214 break; 1230 vfio_device_put(device);
1215 /* 1231 return ret;
1216 * We can't use anon_inode_getfd() because we need to modify 1232 }
1217 * the f_mode flags directly to allow more than just ioctls
1218 */
1219 ret = get_unused_fd_flags(O_CLOEXEC);
1220 if (ret < 0) {
1221 device->ops->release(device->device_data);
1222 break;
1223 }
1224 1233
1225 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, 1234 /*
1226 device, O_RDWR); 1235 * We can't use anon_inode_getfd() because we need to modify
1227 if (IS_ERR(filep)) { 1236 * the f_mode flags directly to allow more than just ioctls
1228 put_unused_fd(ret); 1237 */
1229 ret = PTR_ERR(filep); 1238 ret = get_unused_fd_flags(O_CLOEXEC);
1230 device->ops->release(device->device_data); 1239 if (ret < 0) {
1231 break; 1240 device->ops->release(device->device_data);
1232 } 1241 vfio_device_put(device);
1242 return ret;
1243 }
1233 1244
1234 /* 1245 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1235 * TODO: add an anon_inode interface to do this. 1246 device, O_RDWR);
1236 * Appears to be missing by lack of need rather than 1247 if (IS_ERR(filep)) {
1237 * explicitly prevented. Now there's need. 1248 put_unused_fd(ret);
1238 */ 1249 ret = PTR_ERR(filep);
1239 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1250 device->ops->release(device->device_data);
1251 vfio_device_put(device);
1252 return ret;
1253 }
1254
1255 /*
1256 * TODO: add an anon_inode interface to do this.
1257 * Appears to be missing by lack of need rather than
1258 * explicitly prevented. Now there's need.
1259 */
1260 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1240 1261
1241 vfio_device_get(device); 1262 atomic_inc(&group->container_users);
1242 atomic_inc(&group->container_users);
1243 1263
1244 fd_install(ret, filep); 1264 fd_install(ret, filep);
1245 break;
1246 }
1247 mutex_unlock(&group->device_lock);
1248 1265
1249 return ret; 1266 return ret;
1250} 1267}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/vmalloc.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
26#include <linux/cgroup.h> 27#include <linux/cgroup.h>
27#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sort.h>
28 30
29#include "vhost.h" 31#include "vhost.h"
30 32
33static ushort max_mem_regions = 64;
34module_param(max_mem_regions, ushort, 0444);
35MODULE_PARM_DESC(max_mem_regions,
36 "Maximum number of memory regions in memory map. (default: 64)");
37
31enum { 38enum {
32 VHOST_MEMORY_MAX_NREGIONS = 64,
33 VHOST_MEMORY_F_LOG = 0x1, 39 VHOST_MEMORY_F_LOG = 0x1,
34}; 40};
35 41
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
543 fput(dev->log_file); 549 fput(dev->log_file);
544 dev->log_file = NULL; 550 dev->log_file = NULL;
545 /* No one will access memory at this point */ 551 /* No one will access memory at this point */
546 kfree(dev->memory); 552 kvfree(dev->memory);
547 dev->memory = NULL; 553 dev->memory = NULL;
548 WARN_ON(!list_empty(&dev->work_list)); 554 WARN_ON(!list_empty(&dev->work_list));
549 if (dev->worker) { 555 if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
663} 669}
664EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 670EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
665 671
672static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
673{
674 const struct vhost_memory_region *r1 = p1, *r2 = p2;
675 if (r1->guest_phys_addr < r2->guest_phys_addr)
676 return 1;
677 if (r1->guest_phys_addr > r2->guest_phys_addr)
678 return -1;
679 return 0;
680}
681
682static void *vhost_kvzalloc(unsigned long size)
683{
684 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
685
686 if (!n)
687 n = vzalloc(size);
688 return n;
689}
690
666static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 691static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
667{ 692{
668 struct vhost_memory mem, *newmem, *oldmem; 693 struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
673 return -EFAULT; 698 return -EFAULT;
674 if (mem.padding) 699 if (mem.padding)
675 return -EOPNOTSUPP; 700 return -EOPNOTSUPP;
676 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 701 if (mem.nregions > max_mem_regions)
677 return -E2BIG; 702 return -E2BIG;
678 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); 703 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
679 if (!newmem) 704 if (!newmem)
680 return -ENOMEM; 705 return -ENOMEM;
681 706
682 memcpy(newmem, &mem, size); 707 memcpy(newmem, &mem, size);
683 if (copy_from_user(newmem->regions, m->regions, 708 if (copy_from_user(newmem->regions, m->regions,
684 mem.nregions * sizeof *m->regions)) { 709 mem.nregions * sizeof *m->regions)) {
685 kfree(newmem); 710 kvfree(newmem);
686 return -EFAULT; 711 return -EFAULT;
687 } 712 }
713 sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
714 vhost_memory_reg_sort_cmp, NULL);
688 715
689 if (!memory_access_ok(d, newmem, 0)) { 716 if (!memory_access_ok(d, newmem, 0)) {
690 kfree(newmem); 717 kvfree(newmem);
691 return -EFAULT; 718 return -EFAULT;
692 } 719 }
693 oldmem = d->memory; 720 oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
699 d->vqs[i]->memory = newmem; 726 d->vqs[i]->memory = newmem;
700 mutex_unlock(&d->vqs[i]->mutex); 727 mutex_unlock(&d->vqs[i]->mutex);
701 } 728 }
702 kfree(oldmem); 729 kvfree(oldmem);
703 return 0; 730 return 0;
704} 731}
705 732
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
965 } 992 }
966 if (eventfp != d->log_file) { 993 if (eventfp != d->log_file) {
967 filep = d->log_file; 994 filep = d->log_file;
995 d->log_file = eventfp;
968 ctx = d->log_ctx; 996 ctx = d->log_ctx;
969 d->log_ctx = eventfp ? 997 d->log_ctx = eventfp ?
970 eventfd_ctx_fileget(eventfp) : NULL; 998 eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
992static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 1020static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
993 __u64 addr, __u32 len) 1021 __u64 addr, __u32 len)
994{ 1022{
995 struct vhost_memory_region *reg; 1023 const struct vhost_memory_region *reg;
996 int i; 1024 int start = 0, end = mem->nregions;
997 1025
998 /* linear search is not brilliant, but we really have on the order of 6 1026 while (start < end) {
999 * regions in practice */ 1027 int slot = start + (end - start) / 2;
1000 for (i = 0; i < mem->nregions; ++i) { 1028 reg = mem->regions + slot;
1001 reg = mem->regions + i; 1029 if (addr >= reg->guest_phys_addr)
1002 if (reg->guest_phys_addr <= addr && 1030 end = slot;
1003 reg->guest_phys_addr + reg->memory_size - 1 >= addr) 1031 else
1004 return reg; 1032 start = slot + 1;
1005 } 1033 }
1034
1035 reg = mem->regions + start;
1036 if (addr >= reg->guest_phys_addr &&
1037 reg->guest_phys_addr + reg->memory_size > addr)
1038 return reg;
1006 return NULL; 1039 return NULL;
1007} 1040}
1008 1041
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
1306 int y; 1306 int y;
1307 int c = scr_readw((u16 *) vc->vc_pos); 1307 int c = scr_readw((u16 *) vc->vc_pos);
1308 1308
1309 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1310
1309 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) 1311 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
1310 return; 1312 return;
1311 1313
1312 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1313 if (vc->vc_cursor_type & 0x10) 1314 if (vc->vc_cursor_type & 0x10)
1314 fbcon_del_cursor_timer(info); 1315 fbcon_del_cursor_timer(info);
1315 else 1316 else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..f888561568d9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
298 298
299# Helper logic selected only by the ARM Versatile platform family. 299# Helper logic selected only by the ARM Versatile platform family.
300config PLAT_VERSATILE_CLCD 300config PLAT_VERSATILE_CLCD
301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS 301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
302 depends on ARM 302 depends on ARM
303 depends on FB_ARMCLCD && FB=y 303 depends on FB_ARMCLCD && FB=y
304 304
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
60 } 60 }
61 prev = port; 61 prev = port;
62 } while (of_node_cmp(port->name, "port") != 0); 62 } while (of_node_cmp(port->name, "port") != 0);
63
64 of_node_put(ports);
63 } 65 }
64 66
65 return port; 67 return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
94 if (!port) 96 if (!port)
95 return NULL; 97 return NULL;
96 98
97 np = of_get_next_parent(port); 99 np = of_get_parent(port);
98 100
99 for (i = 0; i < 2 && np; ++i) { 101 for (i = 0; i < 2 && np; ++i) {
100 struct property *prop; 102 struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
653 goto err_free_dma; 653 goto err_free_dma;
654 } 654 }
655 655
656 ret = clk_enable(priv->clk); 656 ret = clk_prepare_enable(priv->clk);
657 if (ret < 0) { 657 if (ret < 0) {
658 dev_err(dev, "failed to enable clock\n"); 658 dev_err(dev, "failed to enable clock\n");
659 goto err_misc_deregister; 659 goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
685 misc_deregister(&priv->misc_dev); 685 misc_deregister(&priv->misc_dev);
686 686
687err_disable_clk: 687err_disable_clk:
688 clk_disable(priv->clk); 688 clk_disable_unprepare(priv->clk);
689 689
690 return ret; 690 return ret;
691} 691}
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 86621fabbb8b..735355b0e023 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -121,6 +121,7 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
121#define REG_3 0x0004a0 121#define REG_3 0x0004a0
122#define REG_4 0x000600 122#define REG_4 0x000600
123#define REG_6 0x000800 123#define REG_6 0x000800
124#define REG_7 0x000804
124#define REG_8 0x000820 125#define REG_8 0x000820
125#define REG_9 0x000a04 126#define REG_9 0x000a04
126#define REG_10 0x018000 127#define REG_10 0x018000
@@ -135,6 +136,8 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
135#define REG_21 0x200218 136#define REG_21 0x200218
136#define REG_22 0x0005a0 137#define REG_22 0x0005a0
137#define REG_23 0x0005c0 138#define REG_23 0x0005c0
139#define REG_24 0x000808
140#define REG_25 0x000b00
138#define REG_26 0x200118 141#define REG_26 0x200118
139#define REG_27 0x200308 142#define REG_27 0x200308
140#define REG_32 0x21003c 143#define REG_32 0x21003c
@@ -429,6 +432,9 @@ ARTIST_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
429#define SET_LENXY_START_RECFILL(fb, lenxy) \ 432#define SET_LENXY_START_RECFILL(fb, lenxy) \
430 WRITE_WORD(lenxy, fb, REG_9) 433 WRITE_WORD(lenxy, fb, REG_9)
431 434
435#define SETUP_COPYAREA(fb) \
436 WRITE_BYTE(0, fb, REG_16b1)
437
432static void 438static void
433HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable) 439HYPER_ENABLE_DISABLE_DISPLAY(struct stifb_info *fb, int enable)
434{ 440{
@@ -1004,6 +1010,36 @@ stifb_blank(int blank_mode, struct fb_info *info)
1004 return 0; 1010 return 0;
1005} 1011}
1006 1012
1013static void
1014stifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
1015{
1016 struct stifb_info *fb = container_of(info, struct stifb_info, info);
1017
1018 SETUP_COPYAREA(fb);
1019
1020 SETUP_HW(fb);
1021 if (fb->info.var.bits_per_pixel == 32) {
1022 WRITE_WORD(0xBBA0A000, fb, REG_10);
1023
1024 NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xffffffff);
1025 } else {
1026 WRITE_WORD(fb->id == S9000_ID_HCRX ? 0x13a02000 : 0x13a01000, fb, REG_10);
1027
1028 NGLE_REALLY_SET_IMAGE_PLANEMASK(fb, 0xff);
1029 }
1030
1031 NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
1032 IBOvals(RopSrc, MaskAddrOffset(0),
1033 BitmapExtent08, StaticReg(1),
1034 DataDynamic, MaskOtc, BGx(0), FGx(0)));
1035
1036 WRITE_WORD(((area->sx << 16) | area->sy), fb, REG_24);
1037 WRITE_WORD(((area->width << 16) | area->height), fb, REG_7);
1038 WRITE_WORD(((area->dx << 16) | area->dy), fb, REG_25);
1039
1040 SETUP_FB(fb);
1041}
1042
1007static void __init 1043static void __init
1008stifb_init_display(struct stifb_info *fb) 1044stifb_init_display(struct stifb_info *fb)
1009{ 1045{
@@ -1069,7 +1105,7 @@ static struct fb_ops stifb_ops = {
1069 .fb_setcolreg = stifb_setcolreg, 1105 .fb_setcolreg = stifb_setcolreg,
1070 .fb_blank = stifb_blank, 1106 .fb_blank = stifb_blank,
1071 .fb_fillrect = cfb_fillrect, 1107 .fb_fillrect = cfb_fillrect,
1072 .fb_copyarea = cfb_copyarea, 1108 .fb_copyarea = stifb_copyarea,
1073 .fb_imageblit = cfb_imageblit, 1109 .fb_imageblit = cfb_imageblit,
1074}; 1110};
1075 1111
@@ -1258,7 +1294,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
1258 info->fbops = &stifb_ops; 1294 info->fbops = &stifb_ops;
1259 info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len); 1295 info->screen_base = ioremap_nocache(REGION_BASE(fb,1), fix->smem_len);
1260 info->screen_size = fix->smem_len; 1296 info->screen_size = fix->smem_len;
1261 info->flags = FBINFO_DEFAULT; 1297 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA;
1262 info->pseudo_palette = &fb->pseudo_palette; 1298 info->pseudo_palette = &fb->pseudo_palette;
1263 1299
1264 /* This has to be done !!! */ 1300 /* This has to be done !!! */
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
44 index = disp->native_mode; 44 index = disp->native_mode;
45 45
46 ret = videomode_from_timings(disp, vm, index); 46 ret = videomode_from_timings(disp, vm, index);
47 if (ret)
48 return ret;
49 47
50 display_timings_release(disp); 48 display_timings_release(disp);
51 49
52 return 0; 50 return ret;
53} 51}
54EXPORT_SYMBOL_GPL(of_get_videomode); 52EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
313static void virtinput_remove(struct virtio_device *vdev) 313static void virtinput_remove(struct virtio_device *vdev)
314{ 314{
315 struct virtio_input *vi = vdev->priv; 315 struct virtio_input *vi = vdev->priv;
316 void *buf;
316 unsigned long flags; 317 unsigned long flags;
317 318
318 spin_lock_irqsave(&vi->lock, flags); 319 spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
320 spin_unlock_irqrestore(&vi->lock, flags); 321 spin_unlock_irqrestore(&vi->lock, flags);
321 322
322 input_unregister_device(vi->idev); 323 input_unregister_device(vi->idev);
324 vdev->config->reset(vdev);
325 while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
326 kfree(buf);
323 vdev->config->del_vqs(vdev); 327 vdev->config->del_vqs(vdev);
324 kfree(vi); 328 kfree(vi);
325} 329}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index c1b03f4235b9..4e7fec36f5c3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
4 * Watchdog driver for ARM SP805 watchdog module 4 * Watchdog driver for ARM SP805 watchdog module
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2 or later. This program is licensed "as is" without any 10 * License version 2 or later. This program is licensed "as is" without any
@@ -303,6 +303,6 @@ static struct amba_driver sp805_wdt_driver = {
303 303
304module_amba_driver(sp805_wdt_driver); 304module_amba_driver(sp805_wdt_driver);
305 305
306MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); 306MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
307MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); 307MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
308MODULE_LICENSE("GPL"); 308MODULE_LICENSE("GPL");
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..bf4a23c7c591 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
472} 472}
473 473
474/* 474/*
475 * We avoid multiple worker processes conflicting via the balloon mutex. 475 * As this is a work item it is guaranteed to run as a single instance only.
476 * We may of course race updates of the target counts (which are protected 476 * We may of course race updates of the target counts (which are protected
477 * by the balloon lock), or with changes to the Xen hard limit, but we will 477 * by the balloon lock), or with changes to the Xen hard limit, but we will
478 * recover from these in time. 478 * recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
482 enum bp_state state = BP_DONE; 482 enum bp_state state = BP_DONE;
483 long credit; 483 long credit;
484 484
485 mutex_lock(&balloon_mutex);
486 485
487 do { 486 do {
487 mutex_lock(&balloon_mutex);
488
488 credit = current_credit(); 489 credit = current_credit();
489 490
490 if (credit > 0) { 491 if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
499 500
500 state = update_schedule(state); 501 state = update_schedule(state);
501 502
502#ifndef CONFIG_PREEMPT 503 mutex_unlock(&balloon_mutex);
503 if (need_resched()) 504
504 schedule(); 505 cond_resched();
505#endif 506
506 } while (credit && state == BP_DONE); 507 } while (credit && state == BP_DONE);
507 508
508 /* Schedule more work if there is some still to be done. */ 509 /* Schedule more work if there is some still to be done. */
509 if (state == BP_EAGAIN) 510 if (state == BP_EAGAIN)
510 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 511 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
511
512 mutex_unlock(&balloon_mutex);
513} 512}
514 513
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 514/* Resets the Xen limit, sets new target, and kicks off processing. */
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..0dbb222daaf1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
568 568
569 pr_debug("priv %p\n", priv); 569 pr_debug("priv %p\n", priv);
570 570
571 mutex_lock(&priv->lock);
571 while (!list_empty(&priv->maps)) { 572 while (!list_empty(&priv->maps)) {
572 map = list_entry(priv->maps.next, struct grant_map, next); 573 map = list_entry(priv->maps.next, struct grant_map, next);
573 list_del(&map->next); 574 list_del(&map->next);
574 gntdev_put_map(NULL /* already removed */, map); 575 gntdev_put_map(NULL /* already removed */, map);
575 } 576 }
576 WARN_ON(!list_empty(&priv->freeable_maps)); 577 WARN_ON(!list_empty(&priv->freeable_maps));
578 mutex_unlock(&priv->lock);
577 579
578 if (use_ptemod) 580 if (use_ptemod)
579 mmu_notifier_unregister(&priv->mn, priv->mm); 581 mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..e30353575d5d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
814 814
815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
816 addrs); 816 addrs);
817 if (!rv) 817 if (!rv) {
818 vunmap(vaddr); 818 vunmap(vaddr);
819 free_xenballooned_pages(node->nr_handles, node->hvm.pages);
820 }
819 else 821 else
820 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, 822 WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
821 node->nr_handles); 823 node->nr_handles);
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 510040b04c96..b1dc51888048 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
540 unlock_new_inode(inode); 540 unlock_new_inode(inode);
541 return inode; 541 return inode;
542error: 542error:
543 unlock_new_inode(inode); 543 iget_failed(inode);
544 iput(inode);
545 return ERR_PTR(retval); 544 return ERR_PTR(retval);
546 545
547} 546}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 09e4433717b8..e8aa57dc8d6d 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
149 unlock_new_inode(inode); 149 unlock_new_inode(inode);
150 return inode; 150 return inode;
151error: 151error:
152 unlock_new_inode(inode); 152 iget_failed(inode);
153 iput(inode);
154 return ERR_PTR(retval); 153 return ERR_PTR(retval);
155 154
156} 155}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 0ef5cc13fae2..81220b2203c6 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,6 +44,8 @@
44#define BTRFS_INODE_IN_DELALLOC_LIST 9 44#define BTRFS_INODE_IN_DELALLOC_LIST 9
45#define BTRFS_INODE_READDIO_NEED_LOCK 10 45#define BTRFS_INODE_READDIO_NEED_LOCK 10
46#define BTRFS_INODE_HAS_PROPS 11 46#define BTRFS_INODE_HAS_PROPS 11
47/* DIO is ready to submit */
48#define BTRFS_INODE_DIO_READY 12
47/* 49/*
48 * The following 3 bits are meant only for the btree inode. 50 * The following 3 bits are meant only for the btree inode.
49 * When any of them is set, it means an error happened while writing an 51 * When any of them is set, it means an error happened while writing an
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 80a9aefb0c46..aac314e14188 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1778,6 +1778,7 @@ struct btrfs_fs_info {
1778 spinlock_t unused_bgs_lock; 1778 spinlock_t unused_bgs_lock;
1779 struct list_head unused_bgs; 1779 struct list_head unused_bgs;
1780 struct mutex unused_bg_unpin_mutex; 1780 struct mutex unused_bg_unpin_mutex;
1781 struct mutex delete_unused_bgs_mutex;
1781 1782
1782 /* For btrfs to record security options */ 1783 /* For btrfs to record security options */
1783 struct security_mnt_opts security_opts; 1784 struct security_mnt_opts security_opts;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 862fbc206755..564a7de17d99 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
378 378
379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); 379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
380 if (ret) 380 if (ret)
381 btrfs_error(root->fs_info, ret, "kobj add dev failed"); 381 btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
382 382
383 printk_in_rcu(KERN_INFO 383 printk_in_rcu(KERN_INFO
384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n", 384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3f43bfea3684..f556c3732c2c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1751,6 +1751,7 @@ static int cleaner_kthread(void *arg)
1751{ 1751{
1752 struct btrfs_root *root = arg; 1752 struct btrfs_root *root = arg;
1753 int again; 1753 int again;
1754 struct btrfs_trans_handle *trans;
1754 1755
1755 do { 1756 do {
1756 again = 0; 1757 again = 0;
@@ -1772,7 +1773,6 @@ static int cleaner_kthread(void *arg)
1772 } 1773 }
1773 1774
1774 btrfs_run_delayed_iputs(root); 1775 btrfs_run_delayed_iputs(root);
1775 btrfs_delete_unused_bgs(root->fs_info);
1776 again = btrfs_clean_one_deleted_snapshot(root); 1776 again = btrfs_clean_one_deleted_snapshot(root);
1777 mutex_unlock(&root->fs_info->cleaner_mutex); 1777 mutex_unlock(&root->fs_info->cleaner_mutex);
1778 1778
@@ -1781,6 +1781,16 @@ static int cleaner_kthread(void *arg)
1781 * needn't do anything special here. 1781 * needn't do anything special here.
1782 */ 1782 */
1783 btrfs_run_defrag_inodes(root->fs_info); 1783 btrfs_run_defrag_inodes(root->fs_info);
1784
1785 /*
1786 * Acquires fs_info->delete_unused_bgs_mutex to avoid racing
1787 * with relocation (btrfs_relocate_chunk) and relocation
1788 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1789 * after acquiring fs_info->delete_unused_bgs_mutex. So we
1790 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1791 * unused block groups.
1792 */
1793 btrfs_delete_unused_bgs(root->fs_info);
1784sleep: 1794sleep:
1785 if (!try_to_freeze() && !again) { 1795 if (!try_to_freeze() && !again) {
1786 set_current_state(TASK_INTERRUPTIBLE); 1796 set_current_state(TASK_INTERRUPTIBLE);
@@ -1789,6 +1799,34 @@ sleep:
1789 __set_current_state(TASK_RUNNING); 1799 __set_current_state(TASK_RUNNING);
1790 } 1800 }
1791 } while (!kthread_should_stop()); 1801 } while (!kthread_should_stop());
1802
1803 /*
1804 * Transaction kthread is stopped before us and wakes us up.
1805 * However we might have started a new transaction and COWed some
1806 * tree blocks when deleting unused block groups for example. So
1807 * make sure we commit the transaction we started to have a clean
1808 * shutdown when evicting the btree inode - if it has dirty pages
1809 * when we do the final iput() on it, eviction will trigger a
1810 * writeback for it which will fail with null pointer dereferences
1811 * since work queues and other resources were already released and
1812 * destroyed by the time the iput/eviction/writeback is made.
1813 */
1814 trans = btrfs_attach_transaction(root);
1815 if (IS_ERR(trans)) {
1816 if (PTR_ERR(trans) != -ENOENT)
1817 btrfs_err(root->fs_info,
1818 "cleaner transaction attach returned %ld",
1819 PTR_ERR(trans));
1820 } else {
1821 int ret;
1822
1823 ret = btrfs_commit_transaction(trans, root);
1824 if (ret)
1825 btrfs_err(root->fs_info,
1826 "cleaner open transaction commit returned %d",
1827 ret);
1828 }
1829
1792 return 0; 1830 return 0;
1793} 1831}
1794 1832
@@ -2492,6 +2530,7 @@ int open_ctree(struct super_block *sb,
2492 spin_lock_init(&fs_info->unused_bgs_lock); 2530 spin_lock_init(&fs_info->unused_bgs_lock);
2493 rwlock_init(&fs_info->tree_mod_log_lock); 2531 rwlock_init(&fs_info->tree_mod_log_lock);
2494 mutex_init(&fs_info->unused_bg_unpin_mutex); 2532 mutex_init(&fs_info->unused_bg_unpin_mutex);
2533 mutex_init(&fs_info->delete_unused_bgs_mutex);
2495 mutex_init(&fs_info->reloc_mutex); 2534 mutex_init(&fs_info->reloc_mutex);
2496 mutex_init(&fs_info->delalloc_root_mutex); 2535 mutex_init(&fs_info->delalloc_root_mutex);
2497 seqlock_init(&fs_info->profiles_lock); 2536 seqlock_init(&fs_info->profiles_lock);
@@ -2803,6 +2842,7 @@ int open_ctree(struct super_block *sb,
2803 !extent_buffer_uptodate(chunk_root->node)) { 2842 !extent_buffer_uptodate(chunk_root->node)) {
2804 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2805 sb->s_id); 2844 sb->s_id);
2845 chunk_root->node = NULL;
2806 goto fail_tree_roots; 2846 goto fail_tree_roots;
2807 } 2847 }
2808 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2848 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2840,7 +2880,7 @@ retry_root_backup:
2840 !extent_buffer_uptodate(tree_root->node)) { 2880 !extent_buffer_uptodate(tree_root->node)) {
2841 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2881 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2842 sb->s_id); 2882 sb->s_id);
2843 2883 tree_root->node = NULL;
2844 goto recovery_tree_root; 2884 goto recovery_tree_root;
2845 } 2885 }
2846 2886
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 38b76cc02f48..07204bf601ed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2296,9 +2296,22 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2296static inline struct btrfs_delayed_ref_node * 2296static inline struct btrfs_delayed_ref_node *
2297select_delayed_ref(struct btrfs_delayed_ref_head *head) 2297select_delayed_ref(struct btrfs_delayed_ref_head *head)
2298{ 2298{
2299 struct btrfs_delayed_ref_node *ref;
2300
2299 if (list_empty(&head->ref_list)) 2301 if (list_empty(&head->ref_list))
2300 return NULL; 2302 return NULL;
2301 2303
2304 /*
2305 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2306 * This is to prevent a ref count from going down to zero, which deletes
2307 * the extent item from the extent tree, when there still are references
2308 * to add, which would fail because they would not find the extent item.
2309 */
2310 list_for_each_entry(ref, &head->ref_list, list) {
2311 if (ref->action == BTRFS_ADD_DELAYED_REF)
2312 return ref;
2313 }
2314
2302 return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node, 2315 return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2303 list); 2316 list);
2304} 2317}
@@ -4214,6 +4227,24 @@ out:
4214 space_info->chunk_alloc = 0; 4227 space_info->chunk_alloc = 0;
4215 spin_unlock(&space_info->lock); 4228 spin_unlock(&space_info->lock);
4216 mutex_unlock(&fs_info->chunk_mutex); 4229 mutex_unlock(&fs_info->chunk_mutex);
4230 /*
4231 * When we allocate a new chunk we reserve space in the chunk block
4232 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4233 * add new nodes/leafs to it if we end up needing to do it when
4234 * inserting the chunk item and updating device items as part of the
4235 * second phase of chunk allocation, performed by
4236 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4237 * large number of new block groups to create in our transaction
4238 * handle's new_bgs list to avoid exhausting the chunk block reserve
4239 * in extreme cases - like having a single transaction create many new
4240 * block groups when starting to write out the free space caches of all
4241 * the block groups that were made dirty during the lifetime of the
4242 * transaction.
4243 */
4244 if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4245 btrfs_create_pending_block_groups(trans, trans->root);
4246 btrfs_trans_release_chunk_metadata(trans);
4247 }
4217 return ret; 4248 return ret;
4218} 4249}
4219 4250
@@ -9889,6 +9920,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9889 } 9920 }
9890 spin_unlock(&fs_info->unused_bgs_lock); 9921 spin_unlock(&fs_info->unused_bgs_lock);
9891 9922
9923 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
9924
9892 /* Don't want to race with allocators so take the groups_sem */ 9925 /* Don't want to race with allocators so take the groups_sem */
9893 down_write(&space_info->groups_sem); 9926 down_write(&space_info->groups_sem);
9894 spin_lock(&block_group->lock); 9927 spin_lock(&block_group->lock);
@@ -9983,6 +10016,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9983end_trans: 10016end_trans:
9984 btrfs_end_transaction(trans, root); 10017 btrfs_end_transaction(trans, root);
9985next: 10018next:
10019 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
9986 btrfs_put_block_group(block_group); 10020 btrfs_put_block_group(block_group);
9987 spin_lock(&fs_info->unused_bgs_lock); 10021 spin_lock(&fs_info->unused_bgs_lock);
9988 } 10022 }
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index f6a596d5a637..d4a582ac3f73 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
246{ 246{
247 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 247 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
248 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; 248 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
249 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
249 struct btrfs_free_space *info; 250 struct btrfs_free_space *info;
250 struct rb_node *n; 251 struct rb_node *n;
251 u64 count; 252 u64 count;
@@ -254,24 +255,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
254 return; 255 return;
255 256
256 while (1) { 257 while (1) {
258 bool add_to_ctl = true;
259
260 spin_lock(rbroot_lock);
257 n = rb_first(rbroot); 261 n = rb_first(rbroot);
258 if (!n) 262 if (!n) {
263 spin_unlock(rbroot_lock);
259 break; 264 break;
265 }
260 266
261 info = rb_entry(n, struct btrfs_free_space, offset_index); 267 info = rb_entry(n, struct btrfs_free_space, offset_index);
262 BUG_ON(info->bitmap); /* Logic error */ 268 BUG_ON(info->bitmap); /* Logic error */
263 269
264 if (info->offset > root->ino_cache_progress) 270 if (info->offset > root->ino_cache_progress)
265 goto free; 271 add_to_ctl = false;
266 else if (info->offset + info->bytes > root->ino_cache_progress) 272 else if (info->offset + info->bytes > root->ino_cache_progress)
267 count = root->ino_cache_progress - info->offset + 1; 273 count = root->ino_cache_progress - info->offset + 1;
268 else 274 else
269 count = info->bytes; 275 count = info->bytes;
270 276
271 __btrfs_add_free_space(ctl, info->offset, count);
272free:
273 rb_erase(&info->offset_index, rbroot); 277 rb_erase(&info->offset_index, rbroot);
274 kfree(info); 278 spin_unlock(rbroot_lock);
279 if (add_to_ctl)
280 __btrfs_add_free_space(ctl, info->offset, count);
281 kmem_cache_free(btrfs_free_space_cachep, info);
275 } 282 }
276} 283}
277 284
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 855935f6671a..e33dff356460 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4209,7 +4209,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4209 u64 extent_num_bytes = 0; 4209 u64 extent_num_bytes = 0;
4210 u64 extent_offset = 0; 4210 u64 extent_offset = 0;
4211 u64 item_end = 0; 4211 u64 item_end = 0;
4212 u64 last_size = (u64)-1; 4212 u64 last_size = new_size;
4213 u32 found_type = (u8)-1; 4213 u32 found_type = (u8)-1;
4214 int found_extent; 4214 int found_extent;
4215 int del_item; 4215 int del_item;
@@ -4493,8 +4493,7 @@ out:
4493 btrfs_abort_transaction(trans, root, ret); 4493 btrfs_abort_transaction(trans, root, ret);
4494 } 4494 }
4495error: 4495error:
4496 if (last_size != (u64)-1 && 4496 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4497 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4498 btrfs_ordered_update_i_size(inode, last_size, NULL); 4497 btrfs_ordered_update_i_size(inode, last_size, NULL);
4499 4498
4500 btrfs_free_path(path); 4499 btrfs_free_path(path);
@@ -4989,8 +4988,9 @@ static void evict_inode_truncate_pages(struct inode *inode)
4989 /* 4988 /*
4990 * Keep looping until we have no more ranges in the io tree. 4989 * Keep looping until we have no more ranges in the io tree.
4991 * We can have ongoing bios started by readpages (called from readahead) 4990 * We can have ongoing bios started by readpages (called from readahead)
4992 * that didn't get their end io callbacks called yet or they are still 4991 * that have their endio callback (extent_io.c:end_bio_extent_readpage)
4993 * in progress ((extent_io.c:end_bio_extent_readpage()). This means some 4992 * still in progress (unlocked the pages in the bio but did not yet
4993 * unlocked the ranges in the io tree). Therefore this means some
4994 * ranges can still be locked and eviction started because before 4994 * ranges can still be locked and eviction started because before
4995 * submitting those bios, which are executed by a separate task (work 4995 * submitting those bios, which are executed by a separate task (work
4996 * queue kthread), inode references (inode->i_count) were not taken 4996 * queue kthread), inode references (inode->i_count) were not taken
@@ -7546,6 +7546,7 @@ unlock:
7546 7546
7547 current->journal_info = outstanding_extents; 7547 current->journal_info = outstanding_extents;
7548 btrfs_free_reserved_data_space(inode, len); 7548 btrfs_free_reserved_data_space(inode, len);
7549 set_bit(BTRFS_INODE_DIO_READY, &BTRFS_I(inode)->runtime_flags);
7549 } 7550 }
7550 7551
7551 /* 7552 /*
@@ -7871,8 +7872,6 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
7871 struct bio *dio_bio; 7872 struct bio *dio_bio;
7872 int ret; 7873 int ret;
7873 7874
7874 if (err)
7875 goto out_done;
7876again: 7875again:
7877 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 7876 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7878 &ordered_offset, 7877 &ordered_offset,
@@ -7895,7 +7894,6 @@ out_test:
7895 ordered = NULL; 7894 ordered = NULL;
7896 goto again; 7895 goto again;
7897 } 7896 }
7898out_done:
7899 dio_bio = dip->dio_bio; 7897 dio_bio = dip->dio_bio;
7900 7898
7901 kfree(dip); 7899 kfree(dip);
@@ -8163,9 +8161,8 @@ out_err:
8163static void btrfs_submit_direct(int rw, struct bio *dio_bio, 8161static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8164 struct inode *inode, loff_t file_offset) 8162 struct inode *inode, loff_t file_offset)
8165{ 8163{
8166 struct btrfs_root *root = BTRFS_I(inode)->root; 8164 struct btrfs_dio_private *dip = NULL;
8167 struct btrfs_dio_private *dip; 8165 struct bio *io_bio = NULL;
8168 struct bio *io_bio;
8169 struct btrfs_io_bio *btrfs_bio; 8166 struct btrfs_io_bio *btrfs_bio;
8170 int skip_sum; 8167 int skip_sum;
8171 int write = rw & REQ_WRITE; 8168 int write = rw & REQ_WRITE;
@@ -8182,7 +8179,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8182 dip = kzalloc(sizeof(*dip), GFP_NOFS); 8179 dip = kzalloc(sizeof(*dip), GFP_NOFS);
8183 if (!dip) { 8180 if (!dip) {
8184 ret = -ENOMEM; 8181 ret = -ENOMEM;
8185 goto free_io_bio; 8182 goto free_ordered;
8186 } 8183 }
8187 8184
8188 dip->private = dio_bio->bi_private; 8185 dip->private = dio_bio->bi_private;
@@ -8210,25 +8207,55 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8210 8207
8211 if (btrfs_bio->end_io) 8208 if (btrfs_bio->end_io)
8212 btrfs_bio->end_io(btrfs_bio, ret); 8209 btrfs_bio->end_io(btrfs_bio, ret);
8213free_io_bio:
8214 bio_put(io_bio);
8215 8210
8216free_ordered: 8211free_ordered:
8217 /* 8212 /*
8218 * If this is a write, we need to clean up the reserved space and kill 8213 * If we arrived here it means either we failed to submit the dip
8219 * the ordered extent. 8214 * or we either failed to clone the dio_bio or failed to allocate the
8215 * dip. If we cloned the dio_bio and allocated the dip, we can just
8216 * call bio_endio against our io_bio so that we get proper resource
8217 * cleanup if we fail to submit the dip, otherwise, we must do the
8218 * same as btrfs_endio_direct_[write|read] because we can't call these
8219 * callbacks - they require an allocated dip and a clone of dio_bio.
8220 */ 8220 */
8221 if (write) { 8221 if (io_bio && dip) {
8222 struct btrfs_ordered_extent *ordered; 8222 bio_endio(io_bio, ret);
8223 ordered = btrfs_lookup_ordered_extent(inode, file_offset); 8223 /*
8224 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 8224 * The end io callbacks free our dip, do the final put on io_bio
8225 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 8225 * and all the cleanup and final put for dio_bio (through
8226 btrfs_free_reserved_extent(root, ordered->start, 8226 * dio_end_io()).
8227 ordered->disk_len, 1); 8227 */
8228 btrfs_put_ordered_extent(ordered); 8228 dip = NULL;
8229 btrfs_put_ordered_extent(ordered); 8229 io_bio = NULL;
8230 } else {
8231 if (write) {
8232 struct btrfs_ordered_extent *ordered;
8233
8234 ordered = btrfs_lookup_ordered_extent(inode,
8235 file_offset);
8236 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8237 /*
8238 * Decrements our ref on the ordered extent and removes
8239 * the ordered extent from the inode's ordered tree,
8240 * doing all the proper resource cleanup such as for the
8241 * reserved space and waking up any waiters for this
8242 * ordered extent (through btrfs_remove_ordered_extent).
8243 */
8244 btrfs_finish_ordered_io(ordered);
8245 } else {
8246 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8247 file_offset + dio_bio->bi_iter.bi_size - 1);
8248 }
8249 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
8250 /*
8251 * Releases and cleans up our dio_bio, no need to bio_put()
8252 * nor bio_endio()/bio_io_error() against dio_bio.
8253 */
8254 dio_end_io(dio_bio, ret);
8230 } 8255 }
8231 bio_endio(dio_bio, ret); 8256 if (io_bio)
8257 bio_put(io_bio);
8258 kfree(dip);
8232} 8259}
8233 8260
8234static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb, 8261static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
@@ -8330,9 +8357,18 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8330 btrfs_submit_direct, flags); 8357 btrfs_submit_direct, flags);
8331 if (iov_iter_rw(iter) == WRITE) { 8358 if (iov_iter_rw(iter) == WRITE) {
8332 current->journal_info = NULL; 8359 current->journal_info = NULL;
8333 if (ret < 0 && ret != -EIOCBQUEUED) 8360 if (ret < 0 && ret != -EIOCBQUEUED) {
8334 btrfs_delalloc_release_space(inode, count); 8361 /*
8335 else if (ret >= 0 && (size_t)ret < count) 8362 * If the error comes from submitting stage,
8363 * btrfs_get_blocsk_direct() has free'd data space,
8364 * and metadata space will be handled by
8365 * finish_ordered_fn, don't do that again to make
8366 * sure bytes_may_use is correct.
8367 */
8368 if (!test_and_clear_bit(BTRFS_INODE_DIO_READY,
8369 &BTRFS_I(inode)->runtime_flags))
8370 btrfs_delalloc_release_space(inode, count);
8371 } else if (ret >= 0 && (size_t)ret < count)
8336 btrfs_delalloc_release_space(inode, 8372 btrfs_delalloc_release_space(inode,
8337 count - (size_t)ret); 8373 count - (size_t)ret);
8338 } 8374 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c86b835da7a8..0770c91586ca 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -87,7 +87,8 @@ struct btrfs_ioctl_received_subvol_args_32 {
87 87
88 88
89static int btrfs_clone(struct inode *src, struct inode *inode, 89static int btrfs_clone(struct inode *src, struct inode *inode,
90 u64 off, u64 olen, u64 olen_aligned, u64 destoff); 90 u64 off, u64 olen, u64 olen_aligned, u64 destoff,
91 int no_time_update);
91 92
92/* Mask out flags that are inappropriate for the given type of inode. */ 93/* Mask out flags that are inappropriate for the given type of inode. */
93static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 94static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -2765,14 +2766,11 @@ out:
2765 return ret; 2766 return ret;
2766} 2767}
2767 2768
2768static struct page *extent_same_get_page(struct inode *inode, u64 off) 2769static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
2769{ 2770{
2770 struct page *page; 2771 struct page *page;
2771 pgoff_t index;
2772 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; 2772 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
2773 2773
2774 index = off >> PAGE_CACHE_SHIFT;
2775
2776 page = grab_cache_page(inode->i_mapping, index); 2774 page = grab_cache_page(inode->i_mapping, index);
2777 if (!page) 2775 if (!page)
2778 return NULL; 2776 return NULL;
@@ -2793,6 +2791,20 @@ static struct page *extent_same_get_page(struct inode *inode, u64 off)
2793 return page; 2791 return page;
2794} 2792}
2795 2793
2794static int gather_extent_pages(struct inode *inode, struct page **pages,
2795 int num_pages, u64 off)
2796{
2797 int i;
2798 pgoff_t index = off >> PAGE_CACHE_SHIFT;
2799
2800 for (i = 0; i < num_pages; i++) {
2801 pages[i] = extent_same_get_page(inode, index + i);
2802 if (!pages[i])
2803 return -ENOMEM;
2804 }
2805 return 0;
2806}
2807
2796static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) 2808static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2797{ 2809{
2798 /* do any pending delalloc/csum calc on src, one way or 2810 /* do any pending delalloc/csum calc on src, one way or
@@ -2818,52 +2830,120 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
2818 } 2830 }
2819} 2831}
2820 2832
2821static void btrfs_double_unlock(struct inode *inode1, u64 loff1, 2833static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
2822 struct inode *inode2, u64 loff2, u64 len)
2823{ 2834{
2824 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2825 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2826
2827 mutex_unlock(&inode1->i_mutex); 2835 mutex_unlock(&inode1->i_mutex);
2828 mutex_unlock(&inode2->i_mutex); 2836 mutex_unlock(&inode2->i_mutex);
2829} 2837}
2830 2838
2831static void btrfs_double_lock(struct inode *inode1, u64 loff1, 2839static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
2832 struct inode *inode2, u64 loff2, u64 len) 2840{
2841 if (inode1 < inode2)
2842 swap(inode1, inode2);
2843
2844 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
2845 if (inode1 != inode2)
2846 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
2847}
2848
2849static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
2850 struct inode *inode2, u64 loff2, u64 len)
2851{
2852 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
2853 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
2854}
2855
2856static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
2857 struct inode *inode2, u64 loff2, u64 len)
2833{ 2858{
2834 if (inode1 < inode2) { 2859 if (inode1 < inode2) {
2835 swap(inode1, inode2); 2860 swap(inode1, inode2);
2836 swap(loff1, loff2); 2861 swap(loff1, loff2);
2837 } 2862 }
2838
2839 mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
2840 lock_extent_range(inode1, loff1, len); 2863 lock_extent_range(inode1, loff1, len);
2841 if (inode1 != inode2) { 2864 if (inode1 != inode2)
2842 mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
2843 lock_extent_range(inode2, loff2, len); 2865 lock_extent_range(inode2, loff2, len);
2866}
2867
2868struct cmp_pages {
2869 int num_pages;
2870 struct page **src_pages;
2871 struct page **dst_pages;
2872};
2873
2874static void btrfs_cmp_data_free(struct cmp_pages *cmp)
2875{
2876 int i;
2877 struct page *pg;
2878
2879 for (i = 0; i < cmp->num_pages; i++) {
2880 pg = cmp->src_pages[i];
2881 if (pg)
2882 page_cache_release(pg);
2883 pg = cmp->dst_pages[i];
2884 if (pg)
2885 page_cache_release(pg);
2886 }
2887 kfree(cmp->src_pages);
2888 kfree(cmp->dst_pages);
2889}
2890
2891static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
2892 struct inode *dst, u64 dst_loff,
2893 u64 len, struct cmp_pages *cmp)
2894{
2895 int ret;
2896 int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
2897 struct page **src_pgarr, **dst_pgarr;
2898
2899 /*
2900 * We must gather up all the pages before we initiate our
2901 * extent locking. We use an array for the page pointers. Size
2902 * of the array is bounded by len, which is in turn bounded by
2903 * BTRFS_MAX_DEDUPE_LEN.
2904 */
2905 src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
2906 dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
2907 if (!src_pgarr || !dst_pgarr) {
2908 kfree(src_pgarr);
2909 kfree(dst_pgarr);
2910 return -ENOMEM;
2844 } 2911 }
2912 cmp->num_pages = num_pages;
2913 cmp->src_pages = src_pgarr;
2914 cmp->dst_pages = dst_pgarr;
2915
2916 ret = gather_extent_pages(src, cmp->src_pages, cmp->num_pages, loff);
2917 if (ret)
2918 goto out;
2919
2920 ret = gather_extent_pages(dst, cmp->dst_pages, cmp->num_pages, dst_loff);
2921
2922out:
2923 if (ret)
2924 btrfs_cmp_data_free(cmp);
2925 return 0;
2845} 2926}
2846 2927
2847static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, 2928static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2848 u64 dst_loff, u64 len) 2929 u64 dst_loff, u64 len, struct cmp_pages *cmp)
2849{ 2930{
2850 int ret = 0; 2931 int ret = 0;
2932 int i;
2851 struct page *src_page, *dst_page; 2933 struct page *src_page, *dst_page;
2852 unsigned int cmp_len = PAGE_CACHE_SIZE; 2934 unsigned int cmp_len = PAGE_CACHE_SIZE;
2853 void *addr, *dst_addr; 2935 void *addr, *dst_addr;
2854 2936
2937 i = 0;
2855 while (len) { 2938 while (len) {
2856 if (len < PAGE_CACHE_SIZE) 2939 if (len < PAGE_CACHE_SIZE)
2857 cmp_len = len; 2940 cmp_len = len;
2858 2941
2859 src_page = extent_same_get_page(src, loff); 2942 BUG_ON(i >= cmp->num_pages);
2860 if (!src_page) 2943
2861 return -EINVAL; 2944 src_page = cmp->src_pages[i];
2862 dst_page = extent_same_get_page(dst, dst_loff); 2945 dst_page = cmp->dst_pages[i];
2863 if (!dst_page) { 2946
2864 page_cache_release(src_page);
2865 return -EINVAL;
2866 }
2867 addr = kmap_atomic(src_page); 2947 addr = kmap_atomic(src_page);
2868 dst_addr = kmap_atomic(dst_page); 2948 dst_addr = kmap_atomic(dst_page);
2869 2949
@@ -2875,15 +2955,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
2875 2955
2876 kunmap_atomic(addr); 2956 kunmap_atomic(addr);
2877 kunmap_atomic(dst_addr); 2957 kunmap_atomic(dst_addr);
2878 page_cache_release(src_page);
2879 page_cache_release(dst_page);
2880 2958
2881 if (ret) 2959 if (ret)
2882 break; 2960 break;
2883 2961
2884 loff += cmp_len;
2885 dst_loff += cmp_len;
2886 len -= cmp_len; 2962 len -= cmp_len;
2963 i++;
2887 } 2964 }
2888 2965
2889 return ret; 2966 return ret;
@@ -2914,27 +2991,62 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2914{ 2991{
2915 int ret; 2992 int ret;
2916 u64 len = olen; 2993 u64 len = olen;
2994 struct cmp_pages cmp;
2995 int same_inode = 0;
2996 u64 same_lock_start = 0;
2997 u64 same_lock_len = 0;
2917 2998
2918 /*
2919 * btrfs_clone() can't handle extents in the same file
2920 * yet. Once that works, we can drop this check and replace it
2921 * with a check for the same inode, but overlapping extents.
2922 */
2923 if (src == dst) 2999 if (src == dst)
2924 return -EINVAL; 3000 same_inode = 1;
2925 3001
2926 if (len == 0) 3002 if (len == 0)
2927 return 0; 3003 return 0;
2928 3004
2929 btrfs_double_lock(src, loff, dst, dst_loff, len); 3005 if (same_inode) {
3006 mutex_lock(&src->i_mutex);
2930 3007
2931 ret = extent_same_check_offsets(src, loff, &len, olen); 3008 ret = extent_same_check_offsets(src, loff, &len, olen);
2932 if (ret) 3009 if (ret)
2933 goto out_unlock; 3010 goto out_unlock;
2934 3011
2935 ret = extent_same_check_offsets(dst, dst_loff, &len, olen); 3012 /*
2936 if (ret) 3013 * Single inode case wants the same checks, except we
2937 goto out_unlock; 3014 * don't want our length pushed out past i_size as
3015 * comparing that data range makes no sense.
3016 *
3017 * extent_same_check_offsets() will do this for an
3018 * unaligned length at i_size, so catch it here and
3019 * reject the request.
3020 *
3021 * This effectively means we require aligned extents
3022 * for the single-inode case, whereas the other cases
3023 * allow an unaligned length so long as it ends at
3024 * i_size.
3025 */
3026 if (len != olen) {
3027 ret = -EINVAL;
3028 goto out_unlock;
3029 }
3030
3031 /* Check for overlapping ranges */
3032 if (dst_loff + len > loff && dst_loff < loff + len) {
3033 ret = -EINVAL;
3034 goto out_unlock;
3035 }
3036
3037 same_lock_start = min_t(u64, loff, dst_loff);
3038 same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3039 } else {
3040 btrfs_double_inode_lock(src, dst);
3041
3042 ret = extent_same_check_offsets(src, loff, &len, olen);
3043 if (ret)
3044 goto out_unlock;
3045
3046 ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
3047 if (ret)
3048 goto out_unlock;
3049 }
2938 3050
2939 /* don't make the dst file partly checksummed */ 3051 /* don't make the dst file partly checksummed */
2940 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != 3052 if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
@@ -2943,12 +3055,32 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
2943 goto out_unlock; 3055 goto out_unlock;
2944 } 3056 }
2945 3057
2946 ret = btrfs_cmp_data(src, loff, dst, dst_loff, len); 3058 ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
3059 if (ret)
3060 goto out_unlock;
3061
3062 if (same_inode)
3063 lock_extent_range(src, same_lock_start, same_lock_len);
3064 else
3065 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
3066
3067 /* pass original length for comparison so we stay within i_size */
3068 ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
2947 if (ret == 0) 3069 if (ret == 0)
2948 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff); 3070 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3071
3072 if (same_inode)
3073 unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
3074 same_lock_start + same_lock_len - 1);
3075 else
3076 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
2949 3077
3078 btrfs_cmp_data_free(&cmp);
2950out_unlock: 3079out_unlock:
2951 btrfs_double_unlock(src, loff, dst, dst_loff, len); 3080 if (same_inode)
3081 mutex_unlock(&src->i_mutex);
3082 else
3083 btrfs_double_inode_unlock(src, dst);
2952 3084
2953 return ret; 3085 return ret;
2954} 3086}
@@ -2958,7 +3090,7 @@ out_unlock:
2958static long btrfs_ioctl_file_extent_same(struct file *file, 3090static long btrfs_ioctl_file_extent_same(struct file *file,
2959 struct btrfs_ioctl_same_args __user *argp) 3091 struct btrfs_ioctl_same_args __user *argp)
2960{ 3092{
2961 struct btrfs_ioctl_same_args *same; 3093 struct btrfs_ioctl_same_args *same = NULL;
2962 struct btrfs_ioctl_same_extent_info *info; 3094 struct btrfs_ioctl_same_extent_info *info;
2963 struct inode *src = file_inode(file); 3095 struct inode *src = file_inode(file);
2964 u64 off; 3096 u64 off;
@@ -2988,6 +3120,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
2988 3120
2989 if (IS_ERR(same)) { 3121 if (IS_ERR(same)) {
2990 ret = PTR_ERR(same); 3122 ret = PTR_ERR(same);
3123 same = NULL;
2991 goto out; 3124 goto out;
2992 } 3125 }
2993 3126
@@ -3058,6 +3191,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
3058 3191
3059out: 3192out:
3060 mnt_drop_write_file(file); 3193 mnt_drop_write_file(file);
3194 kfree(same);
3061 return ret; 3195 return ret;
3062} 3196}
3063 3197
@@ -3100,13 +3234,15 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
3100 struct inode *inode, 3234 struct inode *inode,
3101 u64 endoff, 3235 u64 endoff,
3102 const u64 destoff, 3236 const u64 destoff,
3103 const u64 olen) 3237 const u64 olen,
3238 int no_time_update)
3104{ 3239{
3105 struct btrfs_root *root = BTRFS_I(inode)->root; 3240 struct btrfs_root *root = BTRFS_I(inode)->root;
3106 int ret; 3241 int ret;
3107 3242
3108 inode_inc_iversion(inode); 3243 inode_inc_iversion(inode);
3109 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 3244 if (!no_time_update)
3245 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
3110 /* 3246 /*
3111 * We round up to the block size at eof when determining which 3247 * We round up to the block size at eof when determining which
3112 * extents to clone above, but shouldn't round up the file size. 3248 * extents to clone above, but shouldn't round up the file size.
@@ -3191,13 +3327,13 @@ static void clone_update_extent_map(struct inode *inode,
3191 * @inode: Inode to clone to 3327 * @inode: Inode to clone to
3192 * @off: Offset within source to start clone from 3328 * @off: Offset within source to start clone from
3193 * @olen: Original length, passed by user, of range to clone 3329 * @olen: Original length, passed by user, of range to clone
3194 * @olen_aligned: Block-aligned value of olen, extent_same uses 3330 * @olen_aligned: Block-aligned value of olen
3195 * identical values here
3196 * @destoff: Offset within @inode to start clone 3331 * @destoff: Offset within @inode to start clone
3332 * @no_time_update: Whether to update mtime/ctime on the target inode
3197 */ 3333 */
3198static int btrfs_clone(struct inode *src, struct inode *inode, 3334static int btrfs_clone(struct inode *src, struct inode *inode,
3199 const u64 off, const u64 olen, const u64 olen_aligned, 3335 const u64 off, const u64 olen, const u64 olen_aligned,
3200 const u64 destoff) 3336 const u64 destoff, int no_time_update)
3201{ 3337{
3202 struct btrfs_root *root = BTRFS_I(inode)->root; 3338 struct btrfs_root *root = BTRFS_I(inode)->root;
3203 struct btrfs_path *path = NULL; 3339 struct btrfs_path *path = NULL;
@@ -3452,6 +3588,20 @@ process_slot:
3452 u64 trim = 0; 3588 u64 trim = 0;
3453 u64 aligned_end = 0; 3589 u64 aligned_end = 0;
3454 3590
3591 /*
3592 * Don't copy an inline extent into an offset
3593 * greater than zero. Having an inline extent
3594 * at such an offset results in chaos as btrfs
3595 * isn't prepared for such cases. Just skip
3596 * this case for the same reasons as commented
3597 * at btrfs_ioctl_clone().
3598 */
3599 if (last_dest_end > 0) {
3600 ret = -EOPNOTSUPP;
3601 btrfs_end_transaction(trans, root);
3602 goto out;
3603 }
3604
3455 if (off > key.offset) { 3605 if (off > key.offset) {
3456 skip = off - key.offset; 3606 skip = off - key.offset;
3457 new_key.offset += skip; 3607 new_key.offset += skip;
@@ -3521,7 +3671,8 @@ process_slot:
3521 root->sectorsize); 3671 root->sectorsize);
3522 ret = clone_finish_inode_update(trans, inode, 3672 ret = clone_finish_inode_update(trans, inode,
3523 last_dest_end, 3673 last_dest_end,
3524 destoff, olen); 3674 destoff, olen,
3675 no_time_update);
3525 if (ret) 3676 if (ret)
3526 goto out; 3677 goto out;
3527 if (new_key.offset + datal >= destoff + len) 3678 if (new_key.offset + datal >= destoff + len)
@@ -3559,7 +3710,7 @@ process_slot:
3559 clone_update_extent_map(inode, trans, NULL, last_dest_end, 3710 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3560 destoff + len - last_dest_end); 3711 destoff + len - last_dest_end);
3561 ret = clone_finish_inode_update(trans, inode, destoff + len, 3712 ret = clone_finish_inode_update(trans, inode, destoff + len,
3562 destoff, olen); 3713 destoff, olen, no_time_update);
3563 } 3714 }
3564 3715
3565out: 3716out:
@@ -3696,7 +3847,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
3696 lock_extent_range(inode, destoff, len); 3847 lock_extent_range(inode, destoff, len);
3697 } 3848 }
3698 3849
3699 ret = btrfs_clone(src, inode, off, olen, len, destoff); 3850 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3700 3851
3701 if (same_inode) { 3852 if (same_inode) {
3702 u64 lock_start = min_t(u64, off, destoff); 3853 u64 lock_start = min_t(u64, off, destoff);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 89656d799ff6..52170cf1757e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -552,6 +552,10 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
552 trace_btrfs_ordered_extent_put(entry->inode, entry); 552 trace_btrfs_ordered_extent_put(entry->inode, entry);
553 553
554 if (atomic_dec_and_test(&entry->refs)) { 554 if (atomic_dec_and_test(&entry->refs)) {
555 ASSERT(list_empty(&entry->log_list));
556 ASSERT(list_empty(&entry->trans_list));
557 ASSERT(list_empty(&entry->root_extent_list));
558 ASSERT(RB_EMPTY_NODE(&entry->rb_node));
555 if (entry->inode) 559 if (entry->inode)
556 btrfs_add_delayed_iput(entry->inode); 560 btrfs_add_delayed_iput(entry->inode);
557 while (!list_empty(&entry->list)) { 561 while (!list_empty(&entry->list)) {
@@ -579,6 +583,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
579 spin_lock_irq(&tree->lock); 583 spin_lock_irq(&tree->lock);
580 node = &entry->rb_node; 584 node = &entry->rb_node;
581 rb_erase(node, &tree->tree); 585 rb_erase(node, &tree->tree);
586 RB_CLEAR_NODE(node);
582 if (tree->last == node) 587 if (tree->last == node)
583 tree->last = NULL; 588 tree->last = NULL;
584 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 589 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index d5f1f033b7a0..8a8202956576 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1349,6 +1349,11 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1349 struct btrfs_root *quota_root; 1349 struct btrfs_root *quota_root;
1350 struct btrfs_qgroup *qgroup; 1350 struct btrfs_qgroup *qgroup;
1351 int ret = 0; 1351 int ret = 0;
1352 /* Sometimes we would want to clear the limit on this qgroup.
1353 * To meet this requirement, we treat the -1 as a special value
1354 * which tell kernel to clear the limit on this qgroup.
1355 */
1356 const u64 CLEAR_VALUE = -1;
1352 1357
1353 mutex_lock(&fs_info->qgroup_ioctl_lock); 1358 mutex_lock(&fs_info->qgroup_ioctl_lock);
1354 quota_root = fs_info->quota_root; 1359 quota_root = fs_info->quota_root;
@@ -1364,14 +1369,42 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
1364 } 1369 }
1365 1370
1366 spin_lock(&fs_info->qgroup_lock); 1371 spin_lock(&fs_info->qgroup_lock);
1367 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) 1372 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1368 qgroup->max_rfer = limit->max_rfer; 1373 if (limit->max_rfer == CLEAR_VALUE) {
1369 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) 1374 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1370 qgroup->max_excl = limit->max_excl; 1375 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1371 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) 1376 qgroup->max_rfer = 0;
1372 qgroup->rsv_rfer = limit->rsv_rfer; 1377 } else {
1373 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) 1378 qgroup->max_rfer = limit->max_rfer;
1374 qgroup->rsv_excl = limit->rsv_excl; 1379 }
1380 }
1381 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1382 if (limit->max_excl == CLEAR_VALUE) {
1383 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1384 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1385 qgroup->max_excl = 0;
1386 } else {
1387 qgroup->max_excl = limit->max_excl;
1388 }
1389 }
1390 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1391 if (limit->rsv_rfer == CLEAR_VALUE) {
1392 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1393 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1394 qgroup->rsv_rfer = 0;
1395 } else {
1396 qgroup->rsv_rfer = limit->rsv_rfer;
1397 }
1398 }
1399 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1400 if (limit->rsv_excl == CLEAR_VALUE) {
1401 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1402 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1403 qgroup->rsv_excl = 0;
1404 } else {
1405 qgroup->rsv_excl = limit->rsv_excl;
1406 }
1407 }
1375 qgroup->lim_flags |= limit->flags; 1408 qgroup->lim_flags |= limit->flags;
1376 1409
1377 spin_unlock(&fs_info->qgroup_lock); 1410 spin_unlock(&fs_info->qgroup_lock);
@@ -1618,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1618 /* Exclusive -> exclusive, nothing changed */ 1651 /* Exclusive -> exclusive, nothing changed */
1619 } 1652 }
1620 } 1653 }
1654
1655 /* For exclusive extent, free its reserved bytes too */
1656 if (nr_old_roots == 0 && nr_new_roots == 1 &&
1657 cur_new_count == nr_new_roots)
1658 qg->reserved -= num_bytes;
1621 if (dirty) 1659 if (dirty)
1622 qgroup_dirty(fs_info, qg); 1660 qgroup_dirty(fs_info, qg);
1623 } 1661 }
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 827951fbf7fc..88cbb5995667 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4049,7 +4049,7 @@ restart:
4049 if (trans && progress && err == -ENOSPC) { 4049 if (trans && progress && err == -ENOSPC) {
4050 ret = btrfs_force_chunk_alloc(trans, rc->extent_root, 4050 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
4051 rc->block_group->flags); 4051 rc->block_group->flags);
4052 if (ret == 0) { 4052 if (ret == 1) {
4053 err = 0; 4053 err = 0;
4054 progress = 0; 4054 progress = 0;
4055 goto restart; 4055 goto restart;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9f2feabe99f2..94db0fa5225a 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3571,7 +3571,6 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3571static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, 3571static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3572 int is_dev_replace) 3572 int is_dev_replace)
3573{ 3573{
3574 int ret = 0;
3575 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND; 3574 unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
3576 int max_active = fs_info->thread_pool_size; 3575 int max_active = fs_info->thread_pool_size;
3577 3576
@@ -3584,34 +3583,36 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3584 fs_info->scrub_workers = 3583 fs_info->scrub_workers =
3585 btrfs_alloc_workqueue("btrfs-scrub", flags, 3584 btrfs_alloc_workqueue("btrfs-scrub", flags,
3586 max_active, 4); 3585 max_active, 4);
3587 if (!fs_info->scrub_workers) { 3586 if (!fs_info->scrub_workers)
3588 ret = -ENOMEM; 3587 goto fail_scrub_workers;
3589 goto out; 3588
3590 }
3591 fs_info->scrub_wr_completion_workers = 3589 fs_info->scrub_wr_completion_workers =
3592 btrfs_alloc_workqueue("btrfs-scrubwrc", flags, 3590 btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3593 max_active, 2); 3591 max_active, 2);
3594 if (!fs_info->scrub_wr_completion_workers) { 3592 if (!fs_info->scrub_wr_completion_workers)
3595 ret = -ENOMEM; 3593 goto fail_scrub_wr_completion_workers;
3596 goto out; 3594
3597 }
3598 fs_info->scrub_nocow_workers = 3595 fs_info->scrub_nocow_workers =
3599 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0); 3596 btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3600 if (!fs_info->scrub_nocow_workers) { 3597 if (!fs_info->scrub_nocow_workers)
3601 ret = -ENOMEM; 3598 goto fail_scrub_nocow_workers;
3602 goto out;
3603 }
3604 fs_info->scrub_parity_workers = 3599 fs_info->scrub_parity_workers =
3605 btrfs_alloc_workqueue("btrfs-scrubparity", flags, 3600 btrfs_alloc_workqueue("btrfs-scrubparity", flags,
3606 max_active, 2); 3601 max_active, 2);
3607 if (!fs_info->scrub_parity_workers) { 3602 if (!fs_info->scrub_parity_workers)
3608 ret = -ENOMEM; 3603 goto fail_scrub_parity_workers;
3609 goto out;
3610 }
3611 } 3604 }
3612 ++fs_info->scrub_workers_refcnt; 3605 ++fs_info->scrub_workers_refcnt;
3613out: 3606 return 0;
3614 return ret; 3607
3608fail_scrub_parity_workers:
3609 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3610fail_scrub_nocow_workers:
3611 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3612fail_scrub_wr_completion_workers:
3613 btrfs_destroy_workqueue(fs_info->scrub_workers);
3614fail_scrub_workers:
3615 return -ENOMEM;
3615} 3616}
3616 3617
3617static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) 3618static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c0f18e7266b6..f5021fcb154e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -761,7 +761,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
761 761
762 if (!list_empty(&trans->ordered)) { 762 if (!list_empty(&trans->ordered)) {
763 spin_lock(&info->trans_lock); 763 spin_lock(&info->trans_lock);
764 list_splice(&trans->ordered, &cur_trans->pending_ordered); 764 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
765 spin_unlock(&info->trans_lock); 765 spin_unlock(&info->trans_lock);
766 } 766 }
767 767
@@ -1866,7 +1866,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1866 } 1866 }
1867 1867
1868 spin_lock(&root->fs_info->trans_lock); 1868 spin_lock(&root->fs_info->trans_lock);
1869 list_splice(&trans->ordered, &cur_trans->pending_ordered); 1869 list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
1870 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 1870 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1871 spin_unlock(&root->fs_info->trans_lock); 1871 spin_unlock(&root->fs_info->trans_lock);
1872 atomic_inc(&cur_trans->use_count); 1872 atomic_inc(&cur_trans->use_count);
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2152 2152
2153 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2153 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2154 2154
2155 if (current != root->fs_info->transaction_kthread) 2155 if (current != root->fs_info->transaction_kthread &&
2156 current != root->fs_info->cleaner_kthread)
2156 btrfs_run_delayed_iputs(root); 2157 btrfs_run_delayed_iputs(root);
2157 2158
2158 return ret; 2159 return ret;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1ce80c1c4eb6..9c45431e69ab 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4117,6 +4117,187 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
4117 return 0; 4117 return 0;
4118} 4118}
4119 4119
4120/*
4121 * At the moment we always log all xattrs. This is to figure out at log replay
4122 * time which xattrs must have their deletion replayed. If a xattr is missing
4123 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4124 * because if a xattr is deleted, the inode is fsynced and a power failure
4125 * happens, causing the log to be replayed the next time the fs is mounted,
4126 * we want the xattr to not exist anymore (same behaviour as other filesystems
4127 * with a journal, ext3/4, xfs, f2fs, etc).
4128 */
4129static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4130 struct btrfs_root *root,
4131 struct inode *inode,
4132 struct btrfs_path *path,
4133 struct btrfs_path *dst_path)
4134{
4135 int ret;
4136 struct btrfs_key key;
4137 const u64 ino = btrfs_ino(inode);
4138 int ins_nr = 0;
4139 int start_slot = 0;
4140
4141 key.objectid = ino;
4142 key.type = BTRFS_XATTR_ITEM_KEY;
4143 key.offset = 0;
4144
4145 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4146 if (ret < 0)
4147 return ret;
4148
4149 while (true) {
4150 int slot = path->slots[0];
4151 struct extent_buffer *leaf = path->nodes[0];
4152 int nritems = btrfs_header_nritems(leaf);
4153
4154 if (slot >= nritems) {
4155 if (ins_nr > 0) {
4156 u64 last_extent = 0;
4157
4158 ret = copy_items(trans, inode, dst_path, path,
4159 &last_extent, start_slot,
4160 ins_nr, 1, 0);
4161 /* can't be 1, extent items aren't processed */
4162 ASSERT(ret <= 0);
4163 if (ret < 0)
4164 return ret;
4165 ins_nr = 0;
4166 }
4167 ret = btrfs_next_leaf(root, path);
4168 if (ret < 0)
4169 return ret;
4170 else if (ret > 0)
4171 break;
4172 continue;
4173 }
4174
4175 btrfs_item_key_to_cpu(leaf, &key, slot);
4176 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4177 break;
4178
4179 if (ins_nr == 0)
4180 start_slot = slot;
4181 ins_nr++;
4182 path->slots[0]++;
4183 cond_resched();
4184 }
4185 if (ins_nr > 0) {
4186 u64 last_extent = 0;
4187
4188 ret = copy_items(trans, inode, dst_path, path,
4189 &last_extent, start_slot,
4190 ins_nr, 1, 0);
4191 /* can't be 1, extent items aren't processed */
4192 ASSERT(ret <= 0);
4193 if (ret < 0)
4194 return ret;
4195 }
4196
4197 return 0;
4198}
4199
4200/*
4201 * If the no holes feature is enabled we need to make sure any hole between the
4202 * last extent and the i_size of our inode is explicitly marked in the log. This
4203 * is to make sure that doing something like:
4204 *
4205 * 1) create file with 128Kb of data
4206 * 2) truncate file to 64Kb
4207 * 3) truncate file to 256Kb
4208 * 4) fsync file
4209 * 5) <crash/power failure>
4210 * 6) mount fs and trigger log replay
4211 *
4212 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4213 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4214 * file correspond to a hole. The presence of explicit holes in a log tree is
4215 * what guarantees that log replay will remove/adjust file extent items in the
4216 * fs/subvol tree.
4217 *
4218 * Here we do not need to care about holes between extents, that is already done
4219 * by copy_items(). We also only need to do this in the full sync path, where we
4220 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4221 * lookup the list of modified extent maps and if any represents a hole, we
4222 * insert a corresponding extent representing a hole in the log tree.
4223 */
4224static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4225 struct btrfs_root *root,
4226 struct inode *inode,
4227 struct btrfs_path *path)
4228{
4229 int ret;
4230 struct btrfs_key key;
4231 u64 hole_start;
4232 u64 hole_size;
4233 struct extent_buffer *leaf;
4234 struct btrfs_root *log = root->log_root;
4235 const u64 ino = btrfs_ino(inode);
4236 const u64 i_size = i_size_read(inode);
4237
4238 if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
4239 return 0;
4240
4241 key.objectid = ino;
4242 key.type = BTRFS_EXTENT_DATA_KEY;
4243 key.offset = (u64)-1;
4244
4245 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4246 ASSERT(ret != 0);
4247 if (ret < 0)
4248 return ret;
4249
4250 ASSERT(path->slots[0] > 0);
4251 path->slots[0]--;
4252 leaf = path->nodes[0];
4253 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4254
4255 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4256 /* inode does not have any extents */
4257 hole_start = 0;
4258 hole_size = i_size;
4259 } else {
4260 struct btrfs_file_extent_item *extent;
4261 u64 len;
4262
4263 /*
4264 * If there's an extent beyond i_size, an explicit hole was
4265 * already inserted by copy_items().
4266 */
4267 if (key.offset >= i_size)
4268 return 0;
4269
4270 extent = btrfs_item_ptr(leaf, path->slots[0],
4271 struct btrfs_file_extent_item);
4272
4273 if (btrfs_file_extent_type(leaf, extent) ==
4274 BTRFS_FILE_EXTENT_INLINE) {
4275 len = btrfs_file_extent_inline_len(leaf,
4276 path->slots[0],
4277 extent);
4278 ASSERT(len == i_size);
4279 return 0;
4280 }
4281
4282 len = btrfs_file_extent_num_bytes(leaf, extent);
4283 /* Last extent goes beyond i_size, no need to log a hole. */
4284 if (key.offset + len > i_size)
4285 return 0;
4286 hole_start = key.offset + len;
4287 hole_size = i_size - hole_start;
4288 }
4289 btrfs_release_path(path);
4290
4291 /* Last extent ends at i_size. */
4292 if (hole_size == 0)
4293 return 0;
4294
4295 hole_size = ALIGN(hole_size, root->sectorsize);
4296 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4297 hole_size, 0, hole_size, 0, 0, 0);
4298 return ret;
4299}
4300
4120/* log a single inode in the tree log. 4301/* log a single inode in the tree log.
4121 * At least one parent directory for this inode must exist in the tree 4302 * At least one parent directory for this inode must exist in the tree
4122 * or be logged already. 4303 * or be logged already.
@@ -4155,6 +4336,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4155 u64 ino = btrfs_ino(inode); 4336 u64 ino = btrfs_ino(inode);
4156 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 4337 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4157 u64 logged_isize = 0; 4338 u64 logged_isize = 0;
4339 bool need_log_inode_item = true;
4158 4340
4159 path = btrfs_alloc_path(); 4341 path = btrfs_alloc_path();
4160 if (!path) 4342 if (!path)
@@ -4263,11 +4445,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4263 } else { 4445 } else {
4264 if (inode_only == LOG_INODE_ALL) 4446 if (inode_only == LOG_INODE_ALL)
4265 fast_search = true; 4447 fast_search = true;
4266 ret = log_inode_item(trans, log, dst_path, inode);
4267 if (ret) {
4268 err = ret;
4269 goto out_unlock;
4270 }
4271 goto log_extents; 4448 goto log_extents;
4272 } 4449 }
4273 4450
@@ -4290,6 +4467,28 @@ again:
4290 if (min_key.type > max_key.type) 4467 if (min_key.type > max_key.type)
4291 break; 4468 break;
4292 4469
4470 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4471 need_log_inode_item = false;
4472
4473 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4474 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4475 if (ins_nr == 0)
4476 goto next_slot;
4477 ret = copy_items(trans, inode, dst_path, path,
4478 &last_extent, ins_start_slot,
4479 ins_nr, inode_only, logged_isize);
4480 if (ret < 0) {
4481 err = ret;
4482 goto out_unlock;
4483 }
4484 ins_nr = 0;
4485 if (ret) {
4486 btrfs_release_path(path);
4487 continue;
4488 }
4489 goto next_slot;
4490 }
4491
4293 src = path->nodes[0]; 4492 src = path->nodes[0];
4294 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 4493 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4295 ins_nr++; 4494 ins_nr++;
@@ -4357,9 +4556,26 @@ next_slot:
4357 ins_nr = 0; 4556 ins_nr = 0;
4358 } 4557 }
4359 4558
4559 btrfs_release_path(path);
4560 btrfs_release_path(dst_path);
4561 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
4562 if (err)
4563 goto out_unlock;
4564 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
4565 btrfs_release_path(path);
4566 btrfs_release_path(dst_path);
4567 err = btrfs_log_trailing_hole(trans, root, inode, path);
4568 if (err)
4569 goto out_unlock;
4570 }
4360log_extents: 4571log_extents:
4361 btrfs_release_path(path); 4572 btrfs_release_path(path);
4362 btrfs_release_path(dst_path); 4573 btrfs_release_path(dst_path);
4574 if (need_log_inode_item) {
4575 err = log_inode_item(trans, log, dst_path, inode);
4576 if (err)
4577 goto out_unlock;
4578 }
4363 if (fast_search) { 4579 if (fast_search) {
4364 /* 4580 /*
4365 * Some ordered extents started by fsync might have completed 4581 * Some ordered extents started by fsync might have completed
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 4b438b4c8c91..fbe7c104531c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2766,6 +2766,20 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
2766 root = root->fs_info->chunk_root; 2766 root = root->fs_info->chunk_root;
2767 extent_root = root->fs_info->extent_root; 2767 extent_root = root->fs_info->extent_root;
2768 2768
2769 /*
2770 * Prevent races with automatic removal of unused block groups.
2771 * After we relocate and before we remove the chunk with offset
2772 * chunk_offset, automatic removal of the block group can kick in,
2773 * resulting in a failure when calling btrfs_remove_chunk() below.
2774 *
2775 * Make sure to acquire this mutex before doing a tree search (dev
2776 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
2777 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
2778 * we release the path used to search the chunk/dev tree and before
2779 * the current task acquires this mutex and calls us.
2780 */
2781 ASSERT(mutex_is_locked(&root->fs_info->delete_unused_bgs_mutex));
2782
2769 ret = btrfs_can_relocate(extent_root, chunk_offset); 2783 ret = btrfs_can_relocate(extent_root, chunk_offset);
2770 if (ret) 2784 if (ret)
2771 return -ENOSPC; 2785 return -ENOSPC;
@@ -2814,13 +2828,18 @@ again:
2814 key.type = BTRFS_CHUNK_ITEM_KEY; 2828 key.type = BTRFS_CHUNK_ITEM_KEY;
2815 2829
2816 while (1) { 2830 while (1) {
2831 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
2817 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 2832 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2818 if (ret < 0) 2833 if (ret < 0) {
2834 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2819 goto error; 2835 goto error;
2836 }
2820 BUG_ON(ret == 0); /* Corruption */ 2837 BUG_ON(ret == 0); /* Corruption */
2821 2838
2822 ret = btrfs_previous_item(chunk_root, path, key.objectid, 2839 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2823 key.type); 2840 key.type);
2841 if (ret)
2842 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2824 if (ret < 0) 2843 if (ret < 0)
2825 goto error; 2844 goto error;
2826 if (ret > 0) 2845 if (ret > 0)
@@ -2843,6 +2862,7 @@ again:
2843 else 2862 else
2844 BUG_ON(ret); 2863 BUG_ON(ret);
2845 } 2864 }
2865 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
2846 2866
2847 if (found_key.offset == 0) 2867 if (found_key.offset == 0)
2848 break; 2868 break;
@@ -3299,9 +3319,12 @@ again:
3299 goto error; 3319 goto error;
3300 } 3320 }
3301 3321
3322 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3302 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); 3323 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3303 if (ret < 0) 3324 if (ret < 0) {
3325 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3304 goto error; 3326 goto error;
3327 }
3305 3328
3306 /* 3329 /*
3307 * this shouldn't happen, it means the last relocate 3330 * this shouldn't happen, it means the last relocate
@@ -3313,6 +3336,7 @@ again:
3313 ret = btrfs_previous_item(chunk_root, path, 0, 3336 ret = btrfs_previous_item(chunk_root, path, 0,
3314 BTRFS_CHUNK_ITEM_KEY); 3337 BTRFS_CHUNK_ITEM_KEY);
3315 if (ret) { 3338 if (ret) {
3339 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3316 ret = 0; 3340 ret = 0;
3317 break; 3341 break;
3318 } 3342 }
@@ -3321,8 +3345,10 @@ again:
3321 slot = path->slots[0]; 3345 slot = path->slots[0];
3322 btrfs_item_key_to_cpu(leaf, &found_key, slot); 3346 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3323 3347
3324 if (found_key.objectid != key.objectid) 3348 if (found_key.objectid != key.objectid) {
3349 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3325 break; 3350 break;
3351 }
3326 3352
3327 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); 3353 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3328 3354
@@ -3335,10 +3361,13 @@ again:
3335 ret = should_balance_chunk(chunk_root, leaf, chunk, 3361 ret = should_balance_chunk(chunk_root, leaf, chunk,
3336 found_key.offset); 3362 found_key.offset);
3337 btrfs_release_path(path); 3363 btrfs_release_path(path);
3338 if (!ret) 3364 if (!ret) {
3365 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3339 goto loop; 3366 goto loop;
3367 }
3340 3368
3341 if (counting) { 3369 if (counting) {
3370 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3342 spin_lock(&fs_info->balance_lock); 3371 spin_lock(&fs_info->balance_lock);
3343 bctl->stat.expected++; 3372 bctl->stat.expected++;
3344 spin_unlock(&fs_info->balance_lock); 3373 spin_unlock(&fs_info->balance_lock);
@@ -3348,6 +3377,7 @@ again:
3348 ret = btrfs_relocate_chunk(chunk_root, 3377 ret = btrfs_relocate_chunk(chunk_root,
3349 found_key.objectid, 3378 found_key.objectid,
3350 found_key.offset); 3379 found_key.offset);
3380 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3351 if (ret && ret != -ENOSPC) 3381 if (ret && ret != -ENOSPC)
3352 goto error; 3382 goto error;
3353 if (ret == -ENOSPC) { 3383 if (ret == -ENOSPC) {
@@ -4087,11 +4117,16 @@ again:
4087 key.type = BTRFS_DEV_EXTENT_KEY; 4117 key.type = BTRFS_DEV_EXTENT_KEY;
4088 4118
4089 do { 4119 do {
4120 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
4090 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4121 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4091 if (ret < 0) 4122 if (ret < 0) {
4123 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4092 goto done; 4124 goto done;
4125 }
4093 4126
4094 ret = btrfs_previous_item(root, path, 0, key.type); 4127 ret = btrfs_previous_item(root, path, 0, key.type);
4128 if (ret)
4129 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4095 if (ret < 0) 4130 if (ret < 0)
4096 goto done; 4131 goto done;
4097 if (ret) { 4132 if (ret) {
@@ -4105,6 +4140,7 @@ again:
4105 btrfs_item_key_to_cpu(l, &key, path->slots[0]); 4140 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4106 4141
4107 if (key.objectid != device->devid) { 4142 if (key.objectid != device->devid) {
4143 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4108 btrfs_release_path(path); 4144 btrfs_release_path(path);
4109 break; 4145 break;
4110 } 4146 }
@@ -4113,6 +4149,7 @@ again:
4113 length = btrfs_dev_extent_length(l, dev_extent); 4149 length = btrfs_dev_extent_length(l, dev_extent);
4114 4150
4115 if (key.offset + length <= new_size) { 4151 if (key.offset + length <= new_size) {
4152 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4116 btrfs_release_path(path); 4153 btrfs_release_path(path);
4117 break; 4154 break;
4118 } 4155 }
@@ -4122,6 +4159,7 @@ again:
4122 btrfs_release_path(path); 4159 btrfs_release_path(path);
4123 4160
4124 ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset); 4161 ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
4162 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
4125 if (ret && ret != -ENOSPC) 4163 if (ret && ret != -ENOSPC)
4126 goto done; 4164 goto done;
4127 if (ret == -ENOSPC) 4165 if (ret == -ENOSPC)
@@ -5715,7 +5753,6 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e
5715static void btrfs_end_bio(struct bio *bio, int err) 5753static void btrfs_end_bio(struct bio *bio, int err)
5716{ 5754{
5717 struct btrfs_bio *bbio = bio->bi_private; 5755 struct btrfs_bio *bbio = bio->bi_private;
5718 struct btrfs_device *dev = bbio->stripes[0].dev;
5719 int is_orig_bio = 0; 5756 int is_orig_bio = 0;
5720 5757
5721 if (err) { 5758 if (err) {
@@ -5723,6 +5760,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
5723 if (err == -EIO || err == -EREMOTEIO) { 5760 if (err == -EIO || err == -EREMOTEIO) {
5724 unsigned int stripe_index = 5761 unsigned int stripe_index =
5725 btrfs_io_bio(bio)->stripe_index; 5762 btrfs_io_bio(bio)->stripe_index;
5763 struct btrfs_device *dev;
5726 5764
5727 BUG_ON(stripe_index >= bbio->num_stripes); 5765 BUG_ON(stripe_index >= bbio->num_stripes);
5728 dev = bbio->stripes[stripe_index].dev; 5766 dev = bbio->stripes[stripe_index].dev;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dc10c9dd36c1..ddd5e9471290 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
1506 1506
1507 swap(cf, ci->i_prealloc_cap_flush); 1507 swap(cf, ci->i_prealloc_cap_flush);
1508 cf->caps = flushing; 1508 cf->caps = flushing;
1509 cf->kick = false;
1510 1509
1511 spin_lock(&mdsc->cap_dirty_lock); 1510 spin_lock(&mdsc->cap_dirty_lock);
1512 list_del_init(&ci->i_dirty_item); 1511 list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2123 2122
2124static int __kick_flushing_caps(struct ceph_mds_client *mdsc, 2123static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2125 struct ceph_mds_session *session, 2124 struct ceph_mds_session *session,
2126 struct ceph_inode_info *ci, 2125 struct ceph_inode_info *ci)
2127 bool kick_all)
2128{ 2126{
2129 struct inode *inode = &ci->vfs_inode; 2127 struct inode *inode = &ci->vfs_inode;
2130 struct ceph_cap *cap; 2128 struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2150 2148
2151 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { 2149 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2152 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2150 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2153 if (cf->tid < first_tid) 2151 if (cf->tid >= first_tid)
2154 continue;
2155 if (kick_all || cf->kick)
2156 break; 2152 break;
2157 } 2153 }
2158 if (!n) { 2154 if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2161 } 2157 }
2162 2158
2163 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2159 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2164 cf->kick = false;
2165 2160
2166 first_tid = cf->tid + 1; 2161 first_tid = cf->tid + 1;
2167 2162
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2181{ 2176{
2182 struct ceph_inode_info *ci; 2177 struct ceph_inode_info *ci;
2183 struct ceph_cap *cap; 2178 struct ceph_cap *cap;
2184 struct ceph_cap_flush *cf;
2185 struct rb_node *n;
2186 2179
2187 dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2180 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2188 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2181 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2205 if ((cap->issued & ci->i_flushing_caps) != 2198 if ((cap->issued & ci->i_flushing_caps) !=
2206 ci->i_flushing_caps) { 2199 ci->i_flushing_caps) {
2207 spin_unlock(&ci->i_ceph_lock); 2200 spin_unlock(&ci->i_ceph_lock);
2208 if (!__kick_flushing_caps(mdsc, session, ci, true)) 2201 if (!__kick_flushing_caps(mdsc, session, ci))
2209 continue; 2202 continue;
2210 spin_lock(&ci->i_ceph_lock); 2203 spin_lock(&ci->i_ceph_lock);
2211 } 2204 }
2212 2205
2213 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2214 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2215 cf->kick = true;
2216 }
2217
2218 spin_unlock(&ci->i_ceph_lock); 2206 spin_unlock(&ci->i_ceph_lock);
2219 } 2207 }
2220} 2208}
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2228 2216
2229 dout("kick_flushing_caps mds%d\n", session->s_mds); 2217 dout("kick_flushing_caps mds%d\n", session->s_mds);
2230 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2218 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2231 int delayed = __kick_flushing_caps(mdsc, session, ci, false); 2219 int delayed = __kick_flushing_caps(mdsc, session, ci);
2232 if (delayed) { 2220 if (delayed) {
2233 spin_lock(&ci->i_ceph_lock); 2221 spin_lock(&ci->i_ceph_lock);
2234 __cap_delay_requeue(mdsc, ci); 2222 __cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2261 2249
2262 spin_unlock(&ci->i_ceph_lock); 2250 spin_unlock(&ci->i_ceph_lock);
2263 2251
2264 delayed = __kick_flushing_caps(mdsc, session, ci, true); 2252 delayed = __kick_flushing_caps(mdsc, session, ci);
2265 if (delayed) { 2253 if (delayed) {
2266 spin_lock(&ci->i_ceph_lock); 2254 spin_lock(&ci->i_ceph_lock);
2267 __cap_delay_requeue(mdsc, ci); 2255 __cap_delay_requeue(mdsc, ci);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 4347039ecc18..6706bde9ad1b 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
287 return 0; 287 return 0;
288 288
289 spin_lock(&ctx->flc_lock); 289 spin_lock(&ctx->flc_lock);
290 list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 290 list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
291 ++seen_fcntl; 291 ++seen_fcntl;
292 if (seen_fcntl > num_fcntl_locks) { 292 if (seen_fcntl > num_fcntl_locks) {
293 err = -ENOSPC; 293 err = -ENOSPC;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 860cc016e70d..2f2460d23a06 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
189struct ceph_cap_flush { 189struct ceph_cap_flush {
190 u64 tid; 190 u64 tid;
191 int caps; 191 int caps;
192 bool kick;
193 struct rb_node g_node; // global 192 struct rb_node g_node; // global
194 union { 193 union {
195 struct rb_node i_node; // inode 194 struct rb_node i_node; // inode
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6b8e2f091f5b..48851f6ea6ec 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -896,6 +896,7 @@ COMPATIBLE_IOCTL(FIGETBSZ)
896/* 'X' - originally XFS but some now in the VFS */ 896/* 'X' - originally XFS but some now in the VFS */
897COMPATIBLE_IOCTL(FIFREEZE) 897COMPATIBLE_IOCTL(FIFREEZE)
898COMPATIBLE_IOCTL(FITHAW) 898COMPATIBLE_IOCTL(FITHAW)
899COMPATIBLE_IOCTL(FITRIM)
899COMPATIBLE_IOCTL(KDGETKEYCODE) 900COMPATIBLE_IOCTL(KDGETKEYCODE)
900COMPATIBLE_IOCTL(KDSETKEYCODE) 901COMPATIBLE_IOCTL(KDSETKEYCODE)
901COMPATIBLE_IOCTL(KDGKBTYPE) 902COMPATIBLE_IOCTL(KDGKBTYPE)
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index 4d6a30e76168..b863a09cd2f1 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -115,7 +115,7 @@ void config_item_init_type_name(struct config_item *item,
115 const char *name, 115 const char *name,
116 struct config_item_type *type) 116 struct config_item_type *type)
117{ 117{
118 config_item_set_name(item, name); 118 config_item_set_name(item, "%s", name);
119 item->ci_type = type; 119 item->ci_type = type;
120 config_item_init(item); 120 config_item_init(item);
121} 121}
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
124void config_group_init_type_name(struct config_group *group, const char *name, 124void config_group_init_type_name(struct config_group *group, const char *name,
125 struct config_item_type *type) 125 struct config_item_type *type)
126{ 126{
127 config_item_set_name(&group->cg_item, name); 127 config_item_set_name(&group->cg_item, "%s", name);
128 group->cg_item.ci_type = type; 128 group->cg_item.ci_type = type;
129 config_group_init(group); 129 config_group_init(group);
130} 130}
diff --git a/fs/dax.c b/fs/dax.c
index c3e21ccfc358..a7f77e1fa18c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
319 * @vma: The virtual memory area where the fault occurred 319 * @vma: The virtual memory area where the fault occurred
320 * @vmf: The description of the fault 320 * @vmf: The description of the fault
321 * @get_block: The filesystem method used to translate file offsets to blocks 321 * @get_block: The filesystem method used to translate file offsets to blocks
322 * @complete_unwritten: The filesystem method used to convert unwritten blocks
323 * to written so the data written to them is exposed. This is required for
324 * required by write faults for filesystems that will return unwritten
325 * extent mappings from @get_block, but it is optional for reads as
326 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
327 * not support unwritten extents, the it should pass NULL.
322 * 328 *
323 * When a page fault occurs, filesystems may call this helper in their 329 * When a page fault occurs, filesystems may call this helper in their
324 * fault handler for DAX files. __dax_fault() assumes the caller has done all 330 * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
437 * as for normal BH based IO completions. 443 * as for normal BH based IO completions.
438 */ 444 */
439 error = dax_insert_mapping(inode, &bh, vma, vmf); 445 error = dax_insert_mapping(inode, &bh, vma, vmf);
440 if (buffer_unwritten(&bh)) 446 if (buffer_unwritten(&bh)) {
441 complete_unwritten(&bh, !error); 447 if (complete_unwritten)
448 complete_unwritten(&bh, !error);
449 else
450 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
451 }
442 452
443 out: 453 out:
444 if (error == -ENOMEM) 454 if (error == -ENOMEM)
diff --git a/fs/dcache.c b/fs/dcache.c
index 7a3f3e5f9cea..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -642,7 +642,7 @@ static inline bool fast_dput(struct dentry *dentry)
642 642
643 /* 643 /*
644 * If we have a d_op->d_delete() operation, we sould not 644 * If we have a d_op->d_delete() operation, we sould not
645 * let the dentry count go to zero, so use "put__or_lock". 645 * let the dentry count go to zero, so use "put_or_lock".
646 */ 646 */
647 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) 647 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
648 return lockref_put_or_lock(&dentry->d_lockref); 648 return lockref_put_or_lock(&dentry->d_lockref);
@@ -697,7 +697,7 @@ static inline bool fast_dput(struct dentry *dentry)
697 */ 697 */
698 smp_rmb(); 698 smp_rmb();
699 d_flags = ACCESS_ONCE(dentry->d_flags); 699 d_flags = ACCESS_ONCE(dentry->d_flags);
700 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST; 700 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
701 701
702 /* Nothing to do? Dropping the reference was all we needed? */ 702 /* Nothing to do? Dropping the reference was all we needed? */
703 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) 703 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
@@ -776,6 +776,9 @@ repeat:
776 if (unlikely(d_unhashed(dentry))) 776 if (unlikely(d_unhashed(dentry)))
777 goto kill_it; 777 goto kill_it;
778 778
779 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
780 goto kill_it;
781
779 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) { 782 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
780 if (dentry->d_op->d_delete(dentry)) 783 if (dentry->d_op->d_delete(dentry))
781 goto kill_it; 784 goto kill_it;
@@ -3439,22 +3442,15 @@ void __init vfs_caches_init_early(void)
3439 inode_init_early(); 3442 inode_init_early();
3440} 3443}
3441 3444
3442void __init vfs_caches_init(unsigned long mempages) 3445void __init vfs_caches_init(void)
3443{ 3446{
3444 unsigned long reserve;
3445
3446 /* Base hash sizes on available memory, with a reserve equal to
3447 150% of current kernel size */
3448
3449 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3450 mempages -= reserve;
3451
3452 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3447 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3453 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3448 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3454 3449
3455 dcache_init(); 3450 dcache_init();
3456 inode_init(); 3451 inode_init();
3457 files_init(mempages); 3452 files_init();
3453 files_maxfiles_init();
3458 mnt_init(); 3454 mnt_init();
3459 bdev_cache_init(); 3455 bdev_cache_init();
3460 chrdev_init(); 3456 chrdev_init();
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 72afcc629d7b..feef8a9c4de7 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -325,7 +325,6 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
325 return rc; 325 return rc;
326 326
327 switch (cmd) { 327 switch (cmd) {
328 case FITRIM:
329 case FS_IOC32_GETFLAGS: 328 case FS_IOC32_GETFLAGS:
330 case FS_IOC32_SETFLAGS: 329 case FS_IOC32_SETFLAGS:
331 case FS_IOC32_GETVERSION: 330 case FS_IOC32_GETVERSION:
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index aadb72828834..2553aa8b608d 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -504,7 +504,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
504 struct buffer_head *bh; 504 struct buffer_head *bh;
505 int err; 505 int err;
506 506
507 bh = sb_getblk(inode->i_sb, pblk); 507 bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
508 if (unlikely(!bh)) 508 if (unlikely(!bh))
509 return ERR_PTR(-ENOMEM); 509 return ERR_PTR(-ENOMEM);
510 510
@@ -1089,7 +1089,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
1089 err = -EIO; 1089 err = -EIO;
1090 goto cleanup; 1090 goto cleanup;
1091 } 1091 }
1092 bh = sb_getblk(inode->i_sb, newblock); 1092 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1093 if (unlikely(!bh)) { 1093 if (unlikely(!bh)) {
1094 err = -ENOMEM; 1094 err = -ENOMEM;
1095 goto cleanup; 1095 goto cleanup;
@@ -1283,7 +1283,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1283 if (newblock == 0) 1283 if (newblock == 0)
1284 return err; 1284 return err;
1285 1285
1286 bh = sb_getblk(inode->i_sb, newblock); 1286 bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
1287 if (unlikely(!bh)) 1287 if (unlikely(!bh))
1288 return -ENOMEM; 1288 return -ENOMEM;
1289 lock_buffer(bh); 1289 lock_buffer(bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 41f8e55afcd1..cecf9aa10811 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1323,7 +1323,7 @@ static void ext4_da_page_release_reservation(struct page *page,
1323 unsigned int offset, 1323 unsigned int offset,
1324 unsigned int length) 1324 unsigned int length)
1325{ 1325{
1326 int to_release = 0; 1326 int to_release = 0, contiguous_blks = 0;
1327 struct buffer_head *head, *bh; 1327 struct buffer_head *head, *bh;
1328 unsigned int curr_off = 0; 1328 unsigned int curr_off = 0;
1329 struct inode *inode = page->mapping->host; 1329 struct inode *inode = page->mapping->host;
@@ -1344,14 +1344,23 @@ static void ext4_da_page_release_reservation(struct page *page,
1344 1344
1345 if ((offset <= curr_off) && (buffer_delay(bh))) { 1345 if ((offset <= curr_off) && (buffer_delay(bh))) {
1346 to_release++; 1346 to_release++;
1347 contiguous_blks++;
1347 clear_buffer_delay(bh); 1348 clear_buffer_delay(bh);
1349 } else if (contiguous_blks) {
1350 lblk = page->index <<
1351 (PAGE_CACHE_SHIFT - inode->i_blkbits);
1352 lblk += (curr_off >> inode->i_blkbits) -
1353 contiguous_blks;
1354 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1355 contiguous_blks = 0;
1348 } 1356 }
1349 curr_off = next_off; 1357 curr_off = next_off;
1350 } while ((bh = bh->b_this_page) != head); 1358 } while ((bh = bh->b_this_page) != head);
1351 1359
1352 if (to_release) { 1360 if (contiguous_blks) {
1353 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); 1361 lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1354 ext4_es_remove_extent(inode, lblk, to_release); 1362 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1363 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1355 } 1364 }
1356 1365
1357 /* If we have released all the blocks belonging to a cluster, then we 1366 /* If we have released all the blocks belonging to a cluster, then we
@@ -4344,7 +4353,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
4344 int inode_size = EXT4_INODE_SIZE(sb); 4353 int inode_size = EXT4_INODE_SIZE(sb);
4345 4354
4346 oi.orig_ino = orig_ino; 4355 oi.orig_ino = orig_ino;
4347 ino = (orig_ino & ~(inodes_per_block - 1)) + 1; 4356 /*
4357 * Calculate the first inode in the inode table block. Inode
4358 * numbers are one-based. That is, the first inode in a block
4359 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
4360 */
4361 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
4348 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { 4362 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
4349 if (ino == orig_ino) 4363 if (ino == orig_ino)
4350 continue; 4364 continue;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index cb8451246b30..1346cfa355d0 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -755,7 +755,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 return err; 755 return err;
756 } 756 }
757 case EXT4_IOC_MOVE_EXT: 757 case EXT4_IOC_MOVE_EXT:
758 case FITRIM:
759 case EXT4_IOC_RESIZE_FS: 758 case EXT4_IOC_RESIZE_FS:
760 case EXT4_IOC_PRECACHE_EXTENTS: 759 case EXT4_IOC_PRECACHE_EXTENTS:
761 case EXT4_IOC_SET_ENCRYPTION_POLICY: 760 case EXT4_IOC_SET_ENCRYPTION_POLICY:
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f6aedf88da43..34b610ea5030 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4816,18 +4816,12 @@ do_more:
4816 /* 4816 /*
4817 * blocks being freed are metadata. these blocks shouldn't 4817 * blocks being freed are metadata. these blocks shouldn't
4818 * be used until this transaction is committed 4818 * be used until this transaction is committed
4819 *
4820 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4821 * to fail.
4819 */ 4822 */
4820 retry: 4823 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4821 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); 4824 GFP_NOFS|__GFP_NOFAIL);
4822 if (!new_entry) {
4823 /*
4824 * We use a retry loop because
4825 * ext4_free_blocks() is not allowed to fail.
4826 */
4827 cond_resched();
4828 congestion_wait(BLK_RW_ASYNC, HZ/50);
4829 goto retry;
4830 }
4831 new_entry->efd_start_cluster = bit; 4825 new_entry->efd_start_cluster = bit;
4832 new_entry->efd_group = block_group; 4826 new_entry->efd_group = block_group;
4833 new_entry->efd_count = count_clusters; 4827 new_entry->efd_count = count_clusters;
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b52374e42102..6163ad21cb0e 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
620 struct ext4_inode_info *ei = EXT4_I(inode); 620 struct ext4_inode_info *ei = EXT4_I(inode);
621 struct ext4_extent *ex; 621 struct ext4_extent *ex;
622 unsigned int i, len; 622 unsigned int i, len;
623 ext4_lblk_t start, end;
623 ext4_fsblk_t blk; 624 ext4_fsblk_t blk;
624 handle_t *handle; 625 handle_t *handle;
625 int ret; 626 int ret;
@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
633 EXT4_FEATURE_RO_COMPAT_BIGALLOC)) 634 EXT4_FEATURE_RO_COMPAT_BIGALLOC))
634 return -EOPNOTSUPP; 635 return -EOPNOTSUPP;
635 636
637 /*
638 * In order to get correct extent info, force all delayed allocation
639 * blocks to be allocated, otherwise delayed allocation blocks may not
640 * be reflected and bypass the checks on extent header.
641 */
642 if (test_opt(inode->i_sb, DELALLOC))
643 ext4_alloc_da_blocks(inode);
644
636 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); 645 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
637 if (IS_ERR(handle)) 646 if (IS_ERR(handle))
638 return PTR_ERR(handle); 647 return PTR_ERR(handle);
@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
650 goto errout; 659 goto errout;
651 } 660 }
652 if (eh->eh_entries == 0) 661 if (eh->eh_entries == 0)
653 blk = len = 0; 662 blk = len = start = end = 0;
654 else { 663 else {
655 len = le16_to_cpu(ex->ee_len); 664 len = le16_to_cpu(ex->ee_len);
656 blk = ext4_ext_pblock(ex); 665 blk = ext4_ext_pblock(ex);
657 if (len > EXT4_NDIR_BLOCKS) { 666 start = le32_to_cpu(ex->ee_block);
667 end = start + len - 1;
668 if (end >= EXT4_NDIR_BLOCKS) {
658 ret = -EOPNOTSUPP; 669 ret = -EOPNOTSUPP;
659 goto errout; 670 goto errout;
660 } 671 }
@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
662 673
663 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); 674 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
664 memset(ei->i_data, 0, sizeof(ei->i_data)); 675 memset(ei->i_data, 0, sizeof(ei->i_data));
665 for (i=0; i < len; i++) 676 for (i = start; i <= end; i++)
666 ei->i_data[i] = cpu_to_le32(blk++); 677 ei->i_data[i] = cpu_to_le32(blk++);
667 ext4_mark_inode_dirty(handle, inode); 678 ext4_mark_inode_dirty(handle, inode);
668errout: 679errout:
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..f71e19a9dd3c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 2074
2075 mark_inode_dirty(inode);
2076
2077 if (!PageDirty(page)) { 2075 if (!PageDirty(page)) {
2078 __set_page_dirty_nobuffers(page); 2076 __set_page_dirty_nobuffers(page);
2079 update_dirty_page(inode, page); 2077 update_dirty_page(inode, page);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ada2a3dd701a..b0f38c3b37f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
1331 if (ret) 1331 if (ret)
1332 return ret; 1332 return ret;
1333 1333
1334 if (f2fs_is_atomic_file(inode)) 1334 if (f2fs_is_atomic_file(inode)) {
1335 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1335 commit_inmem_pages(inode, false); 1336 commit_inmem_pages(inode, false);
1337 }
1336 1338
1337 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); 1339 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
1338 mnt_drop_write_file(filp); 1340 mnt_drop_write_file(filp);
1339 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1340 return ret; 1341 return ret;
1341} 1342}
1342 1343
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
1387 f2fs_balance_fs(F2FS_I_SB(inode)); 1388 f2fs_balance_fs(F2FS_I_SB(inode));
1388 1389
1389 if (f2fs_is_atomic_file(inode)) { 1390 if (f2fs_is_atomic_file(inode)) {
1390 commit_inmem_pages(inode, false);
1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1392 commit_inmem_pages(inode, false);
1392 } 1393 }
1393 1394
1394 if (f2fs_is_volatile_file(inode)) 1395 if (f2fs_is_volatile_file(inode))
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index e1e73617d13b..22fb5ef37966 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
556 if (!fio.encrypted_page) 556 if (!fio.encrypted_page)
557 goto put_out; 557 goto put_out;
558 558
559 f2fs_submit_page_bio(&fio); 559 err = f2fs_submit_page_bio(&fio);
560 if (err)
561 goto put_page_out;
562
563 /* write page */
564 lock_page(fio.encrypted_page);
565
566 if (unlikely(!PageUptodate(fio.encrypted_page)))
567 goto put_page_out;
568 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
569 goto put_page_out;
570
571 set_page_dirty(fio.encrypted_page);
572 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
573 if (clear_page_dirty_for_io(fio.encrypted_page))
574 dec_page_count(fio.sbi, F2FS_DIRTY_META);
575
576 set_page_writeback(fio.encrypted_page);
560 577
561 /* allocate block address */ 578 /* allocate block address */
562 f2fs_wait_on_page_writeback(dn.node_page, NODE); 579 f2fs_wait_on_page_writeback(dn.node_page, NODE);
563
564 allocate_data_block(fio.sbi, NULL, fio.blk_addr, 580 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
565 &fio.blk_addr, &sum, CURSEG_COLD_DATA); 581 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
566 dn.data_blkaddr = fio.blk_addr;
567
568 /* write page */
569 lock_page(fio.encrypted_page);
570 set_page_writeback(fio.encrypted_page);
571 fio.rw = WRITE_SYNC; 582 fio.rw = WRITE_SYNC;
572 f2fs_submit_page_mbio(&fio); 583 f2fs_submit_page_mbio(&fio);
573 584
585 dn.data_blkaddr = fio.blk_addr;
574 set_data_blkaddr(&dn); 586 set_data_blkaddr(&dn);
575 f2fs_update_extent_cache(&dn); 587 f2fs_update_extent_cache(&dn);
576 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 588 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
577 if (page->index == 0) 589 if (page->index == 0)
578 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 590 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
579 591put_page_out:
580 f2fs_put_page(fio.encrypted_page, 1); 592 f2fs_put_page(fio.encrypted_page, 1);
581put_out: 593put_out:
582 f2fs_put_dnode(&dn); 594 f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
605 .page = page, 617 .page = page,
606 .encrypted_page = NULL, 618 .encrypted_page = NULL,
607 }; 619 };
620 set_page_dirty(page);
608 f2fs_wait_on_page_writeback(page, DATA); 621 f2fs_wait_on_page_writeback(page, DATA);
609
610 if (clear_page_dirty_for_io(page)) 622 if (clear_page_dirty_for_io(page))
611 inode_dec_dirty_pages(inode); 623 inode_dec_dirty_pages(inode);
612 set_cold_data(page); 624 set_cold_data(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 38e75fb1e488..a13ffcc32992 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
141 kunmap_atomic(dst_addr); 141 kunmap_atomic(dst_addr);
142 SetPageUptodate(page); 142 SetPageUptodate(page);
143no_update: 143no_update:
144 set_page_dirty(page);
145
144 /* clear dirty state */ 146 /* clear dirty state */
145 dirty = clear_page_dirty_for_io(page); 147 dirty = clear_page_dirty_for_io(page);
146 148
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1eb343768781..61b97f9cb9f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
257 if (!abort) { 257 if (!abort) {
258 lock_page(cur->page); 258 lock_page(cur->page);
259 if (cur->page->mapping == inode->i_mapping) { 259 if (cur->page->mapping == inode->i_mapping) {
260 set_page_dirty(cur->page);
260 f2fs_wait_on_page_writeback(cur->page, DATA); 261 f2fs_wait_on_page_writeback(cur->page, DATA);
261 if (clear_page_dirty_for_io(cur->page)) 262 if (clear_page_dirty_for_io(cur->page))
262 inode_dec_dirty_pages(inode); 263 inode_dec_dirty_pages(inode);
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/task_work.h> 26#include <linux/task_work.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/swap.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30 31
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
308 } 309 }
309} 310}
310 311
311void __init files_init(unsigned long mempages) 312void __init files_init(void)
312{ 313{
313 unsigned long n;
314
315 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 314 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
316 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 315 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
316 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317}
317 318
318 /* 319/*
319 * One file with associated inode and dcache is very roughly 1K. 320 * One file with associated inode and dcache is very roughly 1K. Per default
320 * Per default don't use more than 10% of our memory for files. 321 * do not use more than 10% of our memory for files.
321 */ 322 */
323void __init files_maxfiles_init(void)
324{
325 unsigned long n;
326 unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327
328 memreserve = min(memreserve, totalram_pages - 1);
329 n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
322 330
323 n = (mempages * (PAGE_SIZE / 1024)) / 10;
324 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 331 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
325 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
326} 332}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f0520bcf2094..518c6294bf6c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
702 else 702 else
703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
704} 704}
705EXPORT_SYMBOL_GPL(wbc_account_io);
705 706
706/** 707/**
707 * inode_congested - test whether an inode is congested 708 * inode_congested - test whether an inode is congested
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b35d460..ebb5e37455a0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2246 2246
2247 err = -EINVAL; 2247 err = -EINVAL;
2248 if (old) { 2248 if (old) {
2249 struct fuse_dev *fud = fuse_get_dev(old); 2249 struct fuse_dev *fud = NULL;
2250
2251 /*
2252 * Check against file->f_op because CUSE
2253 * uses the same ioctl handler.
2254 */
2255 if (old->f_op == file->f_op &&
2256 old->f_cred->user_ns == file->f_cred->user_ns)
2257 fud = fuse_get_dev(old);
2250 2258
2251 if (fud) { 2259 if (fud) {
2252 mutex_lock(&fuse_mutex); 2260 mutex_lock(&fuse_mutex);
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index f005046e1591..d6a4b55d2ab0 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -484,3 +484,98 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a
484 a->btree.first_free = cpu_to_le16(8); 484 a->btree.first_free = cpu_to_le16(8);
485 return a; 485 return a;
486} 486}
487
488static unsigned find_run(__le32 *bmp, unsigned *idx)
489{
490 unsigned len;
491 while (tstbits(bmp, *idx, 1)) {
492 (*idx)++;
493 if (unlikely(*idx >= 0x4000))
494 return 0;
495 }
496 len = 1;
497 while (!tstbits(bmp, *idx + len, 1))
498 len++;
499 return len;
500}
501
502static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result)
503{
504 int err;
505 secno end;
506 if (fatal_signal_pending(current))
507 return -EINTR;
508 end = start + len;
509 if (start < limit_start)
510 start = limit_start;
511 if (end > limit_end)
512 end = limit_end;
513 if (start >= end)
514 return 0;
515 if (end - start < minlen)
516 return 0;
517 err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0);
518 if (err)
519 return err;
520 *result += end - start;
521 return 0;
522}
523
524int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result)
525{
526 int err = 0;
527 struct hpfs_sb_info *sbi = hpfs_sb(s);
528 unsigned idx, len, start_bmp, end_bmp;
529 __le32 *bmp;
530 struct quad_buffer_head qbh;
531
532 *result = 0;
533 if (!end || end > sbi->sb_fs_size)
534 end = sbi->sb_fs_size;
535 if (start >= sbi->sb_fs_size)
536 return 0;
537 if (minlen > 0x4000)
538 return 0;
539 if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) {
540 hpfs_lock(s);
541 if (s->s_flags & MS_RDONLY) {
542 err = -EROFS;
543 goto unlock_1;
544 }
545 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
546 err = -EIO;
547 goto unlock_1;
548 }
549 idx = 0;
550 while ((len = find_run(bmp, &idx)) && !err) {
551 err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result);
552 idx += len;
553 }
554 hpfs_brelse4(&qbh);
555unlock_1:
556 hpfs_unlock(s);
557 }
558 start_bmp = start >> 14;
559 end_bmp = (end + 0x3fff) >> 14;
560 while (start_bmp < end_bmp && !err) {
561 hpfs_lock(s);
562 if (s->s_flags & MS_RDONLY) {
563 err = -EROFS;
564 goto unlock_2;
565 }
566 if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) {
567 err = -EIO;
568 goto unlock_2;
569 }
570 idx = 0;
571 while ((len = find_run(bmp, &idx)) && !err) {
572 err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result);
573 idx += len;
574 }
575 hpfs_brelse4(&qbh);
576unlock_2:
577 hpfs_unlock(s);
578 start_bmp++;
579 }
580 return err;
581}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2a8e07425de0..dc540bfcee1d 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -327,4 +327,5 @@ const struct file_operations hpfs_dir_ops =
327 .iterate = hpfs_readdir, 327 .iterate = hpfs_readdir,
328 .release = hpfs_dir_release, 328 .release = hpfs_dir_release,
329 .fsync = hpfs_file_fsync, 329 .fsync = hpfs_file_fsync,
330 .unlocked_ioctl = hpfs_ioctl,
330}; 331};
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 6d8cfe9b52d6..7ca28d604bf7 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -203,6 +203,7 @@ const struct file_operations hpfs_file_ops =
203 .release = hpfs_file_release, 203 .release = hpfs_file_release,
204 .fsync = hpfs_file_fsync, 204 .fsync = hpfs_file_fsync,
205 .splice_read = generic_file_splice_read, 205 .splice_read = generic_file_splice_read,
206 .unlocked_ioctl = hpfs_ioctl,
206}; 207};
207 208
208const struct inode_operations hpfs_file_iops = 209const struct inode_operations hpfs_file_iops =
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index bb04b58d1d69..c4867b5116dd 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -18,6 +18,8 @@
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19#include <linux/buffer_head.h> 19#include <linux/buffer_head.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/sched.h>
22#include <linux/blkdev.h>
21#include <asm/unaligned.h> 23#include <asm/unaligned.h>
22 24
23#include "hpfs.h" 25#include "hpfs.h"
@@ -200,6 +202,7 @@ void hpfs_free_dnode(struct super_block *, secno);
200struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *); 202struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *);
201struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **); 203struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
202struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **); 204struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
205int hpfs_trim_fs(struct super_block *, u64, u64, u64, unsigned *);
203 206
204/* anode.c */ 207/* anode.c */
205 208
@@ -318,6 +321,7 @@ __printf(2, 3)
318void hpfs_error(struct super_block *, const char *, ...); 321void hpfs_error(struct super_block *, const char *, ...);
319int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *); 322int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
320unsigned hpfs_get_free_dnodes(struct super_block *); 323unsigned hpfs_get_free_dnodes(struct super_block *);
324long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg);
321 325
322/* 326/*
323 * local time (HPFS) to GMT (Unix) 327 * local time (HPFS) to GMT (Unix)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 7cd00d3a7c9b..68a9bed05628 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
52} 52}
53 53
54/* Filesystem error... */ 54/* Filesystem error... */
55static char err_buf[1024];
56
57void hpfs_error(struct super_block *s, const char *fmt, ...) 55void hpfs_error(struct super_block *s, const char *fmt, ...)
58{ 56{
57 struct va_format vaf;
59 va_list args; 58 va_list args;
60 59
61 va_start(args, fmt); 60 va_start(args, fmt);
62 vsnprintf(err_buf, sizeof(err_buf), fmt, args); 61
62 vaf.fmt = fmt;
63 vaf.va = &args;
64
65 pr_err("filesystem error: %pV", &vaf);
66
63 va_end(args); 67 va_end(args);
64 68
65 pr_err("filesystem error: %s", err_buf);
66 if (!hpfs_sb(s)->sb_was_error) { 69 if (!hpfs_sb(s)->sb_was_error) {
67 if (hpfs_sb(s)->sb_err == 2) { 70 if (hpfs_sb(s)->sb_err == 2) {
68 pr_cont("; crashing the system because you wanted it\n"); 71 pr_cont("; crashing the system because you wanted it\n");
@@ -196,12 +199,39 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
196 return 0; 199 return 0;
197} 200}
198 201
202
203long hpfs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
204{
205 switch (cmd) {
206 case FITRIM: {
207 struct fstrim_range range;
208 secno n_trimmed;
209 int r;
210 if (!capable(CAP_SYS_ADMIN))
211 return -EPERM;
212 if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range)))
213 return -EFAULT;
214 r = hpfs_trim_fs(file_inode(file)->i_sb, range.start >> 9, (range.start + range.len) >> 9, (range.minlen + 511) >> 9, &n_trimmed);
215 if (r)
216 return r;
217 range.len = (u64)n_trimmed << 9;
218 if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range)))
219 return -EFAULT;
220 return 0;
221 }
222 default: {
223 return -ENOIOCTLCMD;
224 }
225 }
226}
227
228
199static struct kmem_cache * hpfs_inode_cachep; 229static struct kmem_cache * hpfs_inode_cachep;
200 230
201static struct inode *hpfs_alloc_inode(struct super_block *sb) 231static struct inode *hpfs_alloc_inode(struct super_block *sb)
202{ 232{
203 struct hpfs_inode_info *ei; 233 struct hpfs_inode_info *ei;
204 ei = (struct hpfs_inode_info *)kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS); 234 ei = kmem_cache_alloc(hpfs_inode_cachep, GFP_NOFS);
205 if (!ei) 235 if (!ei)
206 return NULL; 236 return NULL;
207 ei->vfs_inode.i_version = 1; 237 ei->vfs_inode.i_version = 1;
@@ -424,11 +454,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
424 int o; 454 int o;
425 struct hpfs_sb_info *sbi = hpfs_sb(s); 455 struct hpfs_sb_info *sbi = hpfs_sb(s);
426 char *new_opts = kstrdup(data, GFP_KERNEL); 456 char *new_opts = kstrdup(data, GFP_KERNEL);
427 457
458 if (!new_opts)
459 return -ENOMEM;
460
428 sync_filesystem(s); 461 sync_filesystem(s);
429 462
430 *flags |= MS_NOATIME; 463 *flags |= MS_NOATIME;
431 464
432 hpfs_lock(s); 465 hpfs_lock(s);
433 uid = sbi->sb_uid; gid = sbi->sb_gid; 466 uid = sbi->sb_uid; gid = sbi->sb_gid;
434 umask = 0777 & ~sbi->sb_mode; 467 umask = 0777 & ~sbi->sb_mode;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1011 if (!inode) 1011 if (!inode)
1012 goto out_dentry; 1012 goto out_dentry;
1013 if (creat_flags == HUGETLB_SHMFS_INODE)
1014 inode->i_flags |= S_PRIVATE;
1013 1015
1014 file = ERR_PTR(-ENOMEM); 1016 file = ERR_PTR(-ENOMEM);
1015 if (hugetlb_reserve_pages(inode, 0, 1017 if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index e98d39d75cf4..b9dc23cd04f2 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -76,7 +76,7 @@ static int jfs_open(struct inode *inode, struct file *file)
76 if (ji->active_ag == -1) { 76 if (ji->active_ag == -1) {
77 struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); 77 struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
78 ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); 78 ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
79 atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]); 79 atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]);
80 } 80 }
81 spin_unlock_irq(&ji->ag_lock); 81 spin_unlock_irq(&ji->ag_lock);
82 } 82 }
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 6f1cb2b5ee28..41aa3ca6a6a4 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -134,11 +134,11 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
134 * It has been committed since the last change, but was still 134 * It has been committed since the last change, but was still
135 * on the dirty inode list. 135 * on the dirty inode list.
136 */ 136 */
137 if (!test_cflag(COMMIT_Dirty, inode)) { 137 if (!test_cflag(COMMIT_Dirty, inode)) {
138 /* Make sure committed changes hit the disk */ 138 /* Make sure committed changes hit the disk */
139 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait); 139 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
140 return 0; 140 return 0;
141 } 141 }
142 142
143 if (jfs_commit_inode(inode, wait)) { 143 if (jfs_commit_inode(inode, wait)) {
144 jfs_err("jfs_write_inode: jfs_commit_inode failed!"); 144 jfs_err("jfs_write_inode: jfs_commit_inode failed!");
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index 93a1232894f6..8db8b7d61e40 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -180,9 +180,6 @@ long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
180 case JFS_IOC_SETFLAGS32: 180 case JFS_IOC_SETFLAGS32:
181 cmd = JFS_IOC_SETFLAGS; 181 cmd = JFS_IOC_SETFLAGS;
182 break; 182 break;
183 case FITRIM:
184 cmd = FITRIM;
185 break;
186 } 183 }
187 return jfs_ioctl(filp, cmd, arg); 184 return jfs_ioctl(filp, cmd, arg);
188} 185}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index e33be921aa41..a5ac97b9a933 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1160,7 +1160,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1160 rc = dtModify(tid, new_dir, &new_dname, &ino, 1160 rc = dtModify(tid, new_dir, &new_dname, &ino,
1161 old_ip->i_ino, JFS_RENAME); 1161 old_ip->i_ino, JFS_RENAME);
1162 if (rc) 1162 if (rc)
1163 goto out4; 1163 goto out_tx;
1164 drop_nlink(new_ip); 1164 drop_nlink(new_ip);
1165 if (S_ISDIR(new_ip->i_mode)) { 1165 if (S_ISDIR(new_ip->i_mode)) {
1166 drop_nlink(new_ip); 1166 drop_nlink(new_ip);
@@ -1185,7 +1185,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1185 if ((new_size = commitZeroLink(tid, new_ip)) < 0) { 1185 if ((new_size = commitZeroLink(tid, new_ip)) < 0) {
1186 txAbort(tid, 1); /* Marks FS Dirty */ 1186 txAbort(tid, 1); /* Marks FS Dirty */
1187 rc = new_size; 1187 rc = new_size;
1188 goto out4; 1188 goto out_tx;
1189 } 1189 }
1190 tblk = tid_to_tblock(tid); 1190 tblk = tid_to_tblock(tid);
1191 tblk->xflag |= COMMIT_DELETE; 1191 tblk->xflag |= COMMIT_DELETE;
@@ -1203,7 +1203,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1203 if (rc) { 1203 if (rc) {
1204 jfs_err("jfs_rename didn't expect dtSearch to fail " 1204 jfs_err("jfs_rename didn't expect dtSearch to fail "
1205 "w/rc = %d", rc); 1205 "w/rc = %d", rc);
1206 goto out4; 1206 goto out_tx;
1207 } 1207 }
1208 1208
1209 ino = old_ip->i_ino; 1209 ino = old_ip->i_ino;
@@ -1211,7 +1211,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1211 if (rc) { 1211 if (rc) {
1212 if (rc == -EIO) 1212 if (rc == -EIO)
1213 jfs_err("jfs_rename: dtInsert returned -EIO"); 1213 jfs_err("jfs_rename: dtInsert returned -EIO");
1214 goto out4; 1214 goto out_tx;
1215 } 1215 }
1216 if (S_ISDIR(old_ip->i_mode)) 1216 if (S_ISDIR(old_ip->i_mode))
1217 inc_nlink(new_dir); 1217 inc_nlink(new_dir);
@@ -1226,7 +1226,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1226 jfs_err("jfs_rename did not expect dtDelete to return rc = %d", 1226 jfs_err("jfs_rename did not expect dtDelete to return rc = %d",
1227 rc); 1227 rc);
1228 txAbort(tid, 1); /* Marks Filesystem dirty */ 1228 txAbort(tid, 1); /* Marks Filesystem dirty */
1229 goto out4; 1229 goto out_tx;
1230 } 1230 }
1231 if (S_ISDIR(old_ip->i_mode)) { 1231 if (S_ISDIR(old_ip->i_mode)) {
1232 drop_nlink(old_dir); 1232 drop_nlink(old_dir);
@@ -1285,7 +1285,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1285 1285
1286 rc = txCommit(tid, ipcount, iplist, commit_flag); 1286 rc = txCommit(tid, ipcount, iplist, commit_flag);
1287 1287
1288 out4: 1288 out_tx:
1289 txEnd(tid); 1289 txEnd(tid);
1290 if (new_ip) 1290 if (new_ip)
1291 mutex_unlock(&JFS_IP(new_ip)->commit_mutex); 1291 mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
@@ -1308,13 +1308,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1308 } 1308 }
1309 if (new_ip && (new_ip->i_nlink == 0)) 1309 if (new_ip && (new_ip->i_nlink == 0))
1310 set_cflag(COMMIT_Nolink, new_ip); 1310 set_cflag(COMMIT_Nolink, new_ip);
1311 out3:
1312 free_UCSname(&new_dname);
1313 out2:
1314 free_UCSname(&old_dname);
1315 out1:
1316 if (new_ip && !S_ISDIR(new_ip->i_mode))
1317 IWRITE_UNLOCK(new_ip);
1318 /* 1311 /*
1319 * Truncating the directory index table is not guaranteed. It 1312 * Truncating the directory index table is not guaranteed. It
1320 * may need to be done iteratively 1313 * may need to be done iteratively
@@ -1325,7 +1318,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1325 1318
1326 clear_cflag(COMMIT_Stale, old_dir); 1319 clear_cflag(COMMIT_Stale, old_dir);
1327 } 1320 }
1328 1321 if (new_ip && !S_ISDIR(new_ip->i_mode))
1322 IWRITE_UNLOCK(new_ip);
1323 out3:
1324 free_UCSname(&new_dname);
1325 out2:
1326 free_UCSname(&old_dname);
1327 out1:
1329 jfs_info("jfs_rename: returning %d", rc); 1328 jfs_info("jfs_rename: returning %d", rc);
1330 return rc; 1329 return rc;
1331} 1330}
diff --git a/fs/locks.c b/fs/locks.c
index 653faabb07f4..d3d558ba4da7 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
862 * whether or not a lock was successfully freed by testing the return 862 * whether or not a lock was successfully freed by testing the return
863 * value for -ENOENT. 863 * value for -ENOENT.
864 */ 864 */
865static int flock_lock_file(struct file *filp, struct file_lock *request) 865static int flock_lock_inode(struct inode *inode, struct file_lock *request)
866{ 866{
867 struct file_lock *new_fl = NULL; 867 struct file_lock *new_fl = NULL;
868 struct file_lock *fl; 868 struct file_lock *fl;
869 struct file_lock_context *ctx; 869 struct file_lock_context *ctx;
870 struct inode *inode = file_inode(filp);
871 int error = 0; 870 int error = 0;
872 bool found = false; 871 bool found = false;
873 LIST_HEAD(dispose); 872 LIST_HEAD(dispose);
@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
890 goto find_conflict; 889 goto find_conflict;
891 890
892 list_for_each_entry(fl, &ctx->flc_flock, fl_list) { 891 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
893 if (filp != fl->fl_file) 892 if (request->fl_file != fl->fl_file)
894 continue; 893 continue;
895 if (request->fl_type == fl->fl_type) 894 if (request->fl_type == fl->fl_type)
896 goto out; 895 goto out;
@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
1164EXPORT_SYMBOL(posix_lock_file); 1163EXPORT_SYMBOL(posix_lock_file);
1165 1164
1166/** 1165/**
1167 * posix_lock_file_wait - Apply a POSIX-style lock to a file 1166 * posix_lock_inode_wait - Apply a POSIX-style lock to a file
1168 * @filp: The file to apply the lock to 1167 * @inode: inode of file to which lock request should be applied
1169 * @fl: The lock to be applied 1168 * @fl: The lock to be applied
1170 * 1169 *
1171 * Add a POSIX style lock to a file. 1170 * Variant of posix_lock_file_wait that does not take a filp, and so can be
1172 * We merge adjacent & overlapping locks whenever possible. 1171 * used after the filp has already been torn down.
1173 * POSIX locks are sorted by owner task, then by starting address
1174 */ 1172 */
1175int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1173int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1176{ 1174{
1177 int error; 1175 int error;
1178 might_sleep (); 1176 might_sleep ();
1179 for (;;) { 1177 for (;;) {
1180 error = posix_lock_file(filp, fl, NULL); 1178 error = __posix_lock_file(inode, fl, NULL);
1181 if (error != FILE_LOCK_DEFERRED) 1179 if (error != FILE_LOCK_DEFERRED)
1182 break; 1180 break;
1183 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1181 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1189 } 1187 }
1190 return error; 1188 return error;
1191} 1189}
1192EXPORT_SYMBOL(posix_lock_file_wait); 1190EXPORT_SYMBOL(posix_lock_inode_wait);
1193 1191
1194/** 1192/**
1195 * locks_mandatory_locked - Check for an active lock 1193 * locks_mandatory_locked - Check for an active lock
@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1851} 1849}
1852 1850
1853/** 1851/**
1854 * flock_lock_file_wait - Apply a FLOCK-style lock to a file 1852 * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
1855 * @filp: The file to apply the lock to 1853 * @inode: inode of the file to apply to
1856 * @fl: The lock to be applied 1854 * @fl: The lock to be applied
1857 * 1855 *
1858 * Add a FLOCK style lock to a file. 1856 * Apply a FLOCK style lock request to an inode.
1859 */ 1857 */
1860int flock_lock_file_wait(struct file *filp, struct file_lock *fl) 1858int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1861{ 1859{
1862 int error; 1860 int error;
1863 might_sleep(); 1861 might_sleep();
1864 for (;;) { 1862 for (;;) {
1865 error = flock_lock_file(filp, fl); 1863 error = flock_lock_inode(inode, fl);
1866 if (error != FILE_LOCK_DEFERRED) 1864 if (error != FILE_LOCK_DEFERRED)
1867 break; 1865 break;
1868 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); 1866 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1874 } 1872 }
1875 return error; 1873 return error;
1876} 1874}
1877 1875EXPORT_SYMBOL(flock_lock_inode_wait);
1878EXPORT_SYMBOL(flock_lock_file_wait);
1879 1876
1880/** 1877/**
1881 * sys_flock: - flock() system call. 1878 * sys_flock: - flock() system call.
@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
2401 .fl_type = F_UNLCK, 2398 .fl_type = F_UNLCK,
2402 .fl_end = OFFSET_MAX, 2399 .fl_end = OFFSET_MAX,
2403 }; 2400 };
2404 struct file_lock_context *flctx = file_inode(filp)->i_flctx; 2401 struct inode *inode = file_inode(filp);
2402 struct file_lock_context *flctx = inode->i_flctx;
2405 2403
2406 if (list_empty(&flctx->flc_flock)) 2404 if (list_empty(&flctx->flc_flock))
2407 return; 2405 return;
@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
2409 if (filp->f_op->flock) 2407 if (filp->f_op->flock)
2410 filp->f_op->flock(filp, F_SETLKW, &fl); 2408 filp->f_op->flock(filp, F_SETLKW, &fl);
2411 else 2409 else
2412 flock_lock_file(filp, &fl); 2410 flock_lock_inode(inode, &fl);
2413 2411
2414 if (fl.fl_ops && fl.fl_ops->fl_release_private) 2412 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2415 fl.fl_ops->fl_release_private(&fl); 2413 fl.fl_ops->fl_release_private(&fl);
diff --git a/fs/namei.c b/fs/namei.c
index ae4e4c18b2ac..1c2105ed20c5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
879 return 0; 879 return 0;
880 880
881 /* Allowed if parent directory not sticky and world-writable. */ 881 /* Allowed if parent directory not sticky and world-writable. */
882 parent = nd->path.dentry->d_inode; 882 parent = nd->inode;
883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) 883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
884 return 0; 884 return 0;
885 885
@@ -1954,8 +1954,13 @@ OK:
1954 continue; 1954 continue;
1955 } 1955 }
1956 } 1956 }
1957 if (unlikely(!d_can_lookup(nd->path.dentry))) 1957 if (unlikely(!d_can_lookup(nd->path.dentry))) {
1958 if (nd->flags & LOOKUP_RCU) {
1959 if (unlazy_walk(nd, NULL, 0))
1960 return -ECHILD;
1961 }
1958 return -ENOTDIR; 1962 return -ENOTDIR;
1963 }
1959 } 1964 }
1960} 1965}
1961 1966
diff --git a/fs/namespace.c b/fs/namespace.c
index c7cb8a526c05..2b8aa15fd6df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
1361 UMOUNT_PROPAGATE = 2, 1361 UMOUNT_PROPAGATE = 2,
1362 UMOUNT_CONNECTED = 4, 1362 UMOUNT_CONNECTED = 4,
1363}; 1363};
1364
1365static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1366{
1367 /* Leaving mounts connected is only valid for lazy umounts */
1368 if (how & UMOUNT_SYNC)
1369 return true;
1370
1371 /* A mount without a parent has nothing to be connected to */
1372 if (!mnt_has_parent(mnt))
1373 return true;
1374
1375 /* Because the reference counting rules change when mounts are
1376 * unmounted and connected, umounted mounts may not be
1377 * connected to mounted mounts.
1378 */
1379 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1380 return true;
1381
1382 /* Has it been requested that the mount remain connected? */
1383 if (how & UMOUNT_CONNECTED)
1384 return false;
1385
1386 /* Is the mount locked such that it needs to remain connected? */
1387 if (IS_MNT_LOCKED(mnt))
1388 return false;
1389
1390 /* By default disconnect the mount */
1391 return true;
1392}
1393
1364/* 1394/*
1365 * mount_lock must be held 1395 * mount_lock must be held
1366 * namespace_sem must be held for write 1396 * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1398 if (how & UMOUNT_SYNC) 1428 if (how & UMOUNT_SYNC)
1399 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1429 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1400 1430
1401 disconnect = !(((how & UMOUNT_CONNECTED) && 1431 disconnect = disconnect_mount(p, how);
1402 mnt_has_parent(p) &&
1403 (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
1404 IS_MNT_LOCKED_AND_LAZY(p));
1405 1432
1406 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1433 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1407 disconnect ? &unmounted : NULL); 1434 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
1538 while (!hlist_empty(&mp->m_list)) { 1565 while (!hlist_empty(&mp->m_list)) {
1539 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1540 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1541 struct mount *p, *tmp; 1568 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1542 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1569 umount_mnt(mnt);
1543 hlist_add_head(&p->mnt_umount.s_list, &unmounted);
1544 umount_mnt(p);
1545 }
1546 } 1570 }
1547 else umount_tree(mnt, UMOUNT_CONNECTED); 1571 else umount_tree(mnt, UMOUNT_CONNECTED);
1548 } 1572 }
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ecebb406cc1a..4a90c9bb3135 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
775 server->options = data->options; 775 server->options = data->options;
776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| 777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; 778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
779 779
780 if (data->rsize) 780 if (data->rsize)
781 server->rsize = nfs_block_size(data->rsize, NULL); 781 server->rsize = nfs_block_size(data->rsize, NULL);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c12951b9551e..b3289d701eea 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1852 struct nfs42_layoutstat_devinfo *devinfo; 1852 struct nfs42_layoutstat_devinfo *devinfo;
1853 int i; 1853 int i;
1854 1854
1855 for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { 1855 for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
1856 if (*dev_count >= dev_limit) 1856 if (*dev_count >= dev_limit)
1857 break; 1857 break;
1858 mirror = FF_LAYOUT_COMP(pls, i); 1858 mirror = FF_LAYOUT_COMP(pls, i);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b77b328a06d7..0adc7d245b3d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
444 inode->i_version = fattr->change_attr; 444 inode->i_version = fattr->change_attr;
445 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 445 else
446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
447 | NFS_INO_REVAL_PAGECACHE);
447 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 448 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
448 inode->i_size = nfs_size_to_loff_t(fattr->size); 449 inode->i_size = nfs_size_to_loff_t(fattr->size);
449 else 450 else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1244 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 1245 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1245 cur_size = i_size_read(inode); 1246 cur_size = i_size_read(inode);
1246 new_isize = nfs_size_to_loff_t(fattr->size); 1247 new_isize = nfs_size_to_loff_t(fattr->size);
1247 if (cur_size != new_isize && nfsi->nrequests == 0) 1248 if (cur_size != new_isize)
1248 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1249 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1249 } 1250 }
1251 if (nfsi->nrequests != 0)
1252 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1250 1253
1251 /* Have any file permissions changed? */ 1254 /* Have any file permissions changed? */
1252 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) 1255 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1684 invalid |= NFS_INO_INVALID_ATTR 1687 invalid |= NFS_INO_INVALID_ATTR
1685 | NFS_INO_INVALID_DATA 1688 | NFS_INO_INVALID_DATA
1686 | NFS_INO_INVALID_ACCESS 1689 | NFS_INO_INVALID_ACCESS
1687 | NFS_INO_INVALID_ACL 1690 | NFS_INO_INVALID_ACL;
1688 | NFS_INO_REVAL_PAGECACHE;
1689 if (S_ISDIR(inode->i_mode)) 1691 if (S_ISDIR(inode->i_mode))
1690 nfs_force_lookup_revalidate(inode); 1692 nfs_force_lookup_revalidate(inode);
1691 inode->i_version = fattr->change_attr; 1693 inode->i_version = fattr->change_attr;
1692 } 1694 }
1693 } else if (server->caps & NFS_CAP_CHANGE_ATTR) 1695 } else
1694 nfsi->cache_validity |= save_cache_validity; 1696 nfsi->cache_validity |= save_cache_validity;
1695 1697
1696 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1698 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1717 if ((nfsi->nrequests == 0) || new_isize > cur_isize) { 1719 if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
1718 i_size_write(inode, new_isize); 1720 i_size_write(inode, new_isize);
1719 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1721 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1720 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1721 } 1722 }
1722 dprintk("NFS: isize change on server for file %s/%ld " 1723 dprintk("NFS: isize change on server for file %s/%ld "
1723 "(%Ld to %Ld)\n", 1724 "(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7e3c4604bea8..9b372b845f6a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
296 296
297#ifdef CONFIG_NFS_V4_SECURITY_LABEL 297#ifdef CONFIG_NFS_V4_SECURITY_LABEL
298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); 298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
299static inline struct nfs4_label *
300nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
301{
302 if (!dst || !src)
303 return NULL;
304
305 if (src->len > NFS4_MAXLABELLEN)
306 return NULL;
307
308 dst->lfs = src->lfs;
309 dst->pi = src->pi;
310 dst->len = src->len;
311 memcpy(dst->label, src->label, src->len);
312
313 return dst;
314}
299static inline void nfs4_label_free(struct nfs4_label *label) 315static inline void nfs4_label_free(struct nfs4_label *label)
300{ 316{
301 if (label) { 317 if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
316static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 332static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
317{ 333{
318} 334}
335static inline struct nfs4_label *
336nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
337{
338 return NULL;
339}
319#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 340#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
320 341
321/* proc.c */ 342/* proc.c */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f486b80f927a..d731bbf974aa 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 return err; 135 return err;
136} 136}
137 137
138loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 138static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
139{ 139{
140 struct inode *inode = file_inode(filep); 140 struct inode *inode = file_inode(filep);
141 struct nfs42_seek_args args = { 141 struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
172} 172}
173 173
174loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { };
178 int err;
179
180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception);
185 } while (exception.retry);
186
187 return err;
188}
189
190
174static void 191static void
175nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 192nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
176{ 193{
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6f228b5af819..3acb1eb72930 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
467 467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{ 469{
470 do_renew_lease(server->nfs_client, timestamp); 470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
471} 474}
472 475
473struct nfs4_call_sync_data { 476struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
616 clp = session->clp; 619 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp); 620 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */ 621 /* Check sequence flags */
619 if (res->sr_status_flags != 0) 622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
620 nfs4_schedule_lease_recovery(clp);
621 nfs41_update_target_slotid(slot->table, slot, res); 623 nfs41_update_target_slotid(slot->table, slot, res);
622 break; 624 break;
623 case 1: 625 case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
910 struct nfs_open_confirmres c_res; 912 struct nfs_open_confirmres c_res;
911 struct nfs4_string owner_name; 913 struct nfs4_string owner_name;
912 struct nfs4_string group_name; 914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
913 struct nfs_fattr f_attr; 916 struct nfs_fattr f_attr;
914 struct nfs4_label *f_label; 917 struct nfs4_label *f_label;
915 struct dentry *dir; 918 struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1013 if (IS_ERR(p->f_label)) 1016 if (IS_ERR(p->f_label))
1014 goto err_free_p; 1017 goto err_free_p;
1015 1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1016 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1017 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1018 if (IS_ERR(p->o_arg.seqid)) 1025 if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1041 p->o_arg.server = server; 1048 p->o_arg.server = server;
1042 p->o_arg.bitmask = nfs4_bitmask(server, label); 1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1043 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1044 p->o_arg.label = label; 1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1045 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1046 switch (p->o_arg.claim) { 1053 switch (p->o_arg.claim) {
1047 case NFS4_OPEN_CLAIM_NULL: 1054 case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1074 return p; 1081 return p;
1075 1082
1076err_free_label: 1083err_free_label:
1084 nfs4_label_free(p->a_label);
1085err_free_f:
1077 nfs4_label_free(p->f_label); 1086 nfs4_label_free(p->f_label);
1078err_free_p: 1087err_free_p:
1079 kfree(p); 1088 kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
1093 nfs4_put_open_state(p->state); 1102 nfs4_put_open_state(p->state);
1094 nfs4_put_state_owner(p->owner); 1103 nfs4_put_state_owner(p->owner);
1095 1104
1105 nfs4_label_free(p->a_label);
1096 nfs4_label_free(p->f_label); 1106 nfs4_label_free(p->f_label);
1097 1107
1098 dput(p->dir); 1108 dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1198 1208
1199static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1209static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1200{ 1210{
1211 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1212 return;
1201 if (state->n_wronly) 1213 if (state->n_wronly)
1202 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1214 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1203 if (state->n_rdonly) 1215 if (state->n_rdonly)
1204 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1216 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1205 if (state->n_rdwr) 1217 if (state->n_rdwr)
1206 set_bit(NFS_O_RDWR_STATE, &state->flags); 1218 set_bit(NFS_O_RDWR_STATE, &state->flags);
1219 set_bit(NFS_OPEN_STATE, &state->flags);
1207} 1220}
1208 1221
1209static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1222static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -5439,15 +5452,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
5439 return err; 5452 return err;
5440} 5453}
5441 5454
5442static int do_vfs_lock(struct file *file, struct file_lock *fl) 5455static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5443{ 5456{
5444 int res = 0; 5457 int res = 0;
5445 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 5458 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
5446 case FL_POSIX: 5459 case FL_POSIX:
5447 res = posix_lock_file_wait(file, fl); 5460 res = posix_lock_inode_wait(inode, fl);
5448 break; 5461 break;
5449 case FL_FLOCK: 5462 case FL_FLOCK:
5450 res = flock_lock_file_wait(file, fl); 5463 res = flock_lock_inode_wait(inode, fl);
5451 break; 5464 break;
5452 default: 5465 default:
5453 BUG(); 5466 BUG();
@@ -5484,7 +5497,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5484 atomic_inc(&lsp->ls_count); 5497 atomic_inc(&lsp->ls_count);
5485 /* Ensure we don't close file until we're done freeing locks! */ 5498 /* Ensure we don't close file until we're done freeing locks! */
5486 p->ctx = get_nfs_open_context(ctx); 5499 p->ctx = get_nfs_open_context(ctx);
5487 get_file(fl->fl_file);
5488 memcpy(&p->fl, fl, sizeof(p->fl)); 5500 memcpy(&p->fl, fl, sizeof(p->fl));
5489 p->server = NFS_SERVER(inode); 5501 p->server = NFS_SERVER(inode);
5490 return p; 5502 return p;
@@ -5496,7 +5508,6 @@ static void nfs4_locku_release_calldata(void *data)
5496 nfs_free_seqid(calldata->arg.seqid); 5508 nfs_free_seqid(calldata->arg.seqid);
5497 nfs4_put_lock_state(calldata->lsp); 5509 nfs4_put_lock_state(calldata->lsp);
5498 put_nfs_open_context(calldata->ctx); 5510 put_nfs_open_context(calldata->ctx);
5499 fput(calldata->fl.fl_file);
5500 kfree(calldata); 5511 kfree(calldata);
5501} 5512}
5502 5513
@@ -5509,7 +5520,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
5509 switch (task->tk_status) { 5520 switch (task->tk_status) {
5510 case 0: 5521 case 0:
5511 renew_lease(calldata->server, calldata->timestamp); 5522 renew_lease(calldata->server, calldata->timestamp);
5512 do_vfs_lock(calldata->fl.fl_file, &calldata->fl); 5523 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5513 if (nfs4_update_lock_stateid(calldata->lsp, 5524 if (nfs4_update_lock_stateid(calldata->lsp,
5514 &calldata->res.stateid)) 5525 &calldata->res.stateid))
5515 break; 5526 break;
@@ -5617,7 +5628,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
5617 mutex_lock(&sp->so_delegreturn_mutex); 5628 mutex_lock(&sp->so_delegreturn_mutex);
5618 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5629 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5619 down_read(&nfsi->rwsem); 5630 down_read(&nfsi->rwsem);
5620 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 5631 if (do_vfs_lock(inode, request) == -ENOENT) {
5621 up_read(&nfsi->rwsem); 5632 up_read(&nfsi->rwsem);
5622 mutex_unlock(&sp->so_delegreturn_mutex); 5633 mutex_unlock(&sp->so_delegreturn_mutex);
5623 goto out; 5634 goto out;
@@ -5758,7 +5769,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5758 data->timestamp); 5769 data->timestamp);
5759 if (data->arg.new_lock) { 5770 if (data->arg.new_lock) {
5760 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5771 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5761 if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) { 5772 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5762 rpc_restart_call_prepare(task); 5773 rpc_restart_call_prepare(task);
5763 break; 5774 break;
5764 } 5775 }
@@ -6000,7 +6011,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
6000 if (status != 0) 6011 if (status != 0)
6001 goto out; 6012 goto out;
6002 request->fl_flags |= FL_ACCESS; 6013 request->fl_flags |= FL_ACCESS;
6003 status = do_vfs_lock(request->fl_file, request); 6014 status = do_vfs_lock(state->inode, request);
6004 if (status < 0) 6015 if (status < 0)
6005 goto out; 6016 goto out;
6006 down_read(&nfsi->rwsem); 6017 down_read(&nfsi->rwsem);
@@ -6008,7 +6019,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
6008 /* Yes: cache locks! */ 6019 /* Yes: cache locks! */
6009 /* ...but avoid races with delegation recall... */ 6020 /* ...but avoid races with delegation recall... */
6010 request->fl_flags = fl_flags & ~FL_SLEEP; 6021 request->fl_flags = fl_flags & ~FL_SLEEP;
6011 status = do_vfs_lock(request->fl_file, request); 6022 status = do_vfs_lock(state->inode, request);
6012 up_read(&nfsi->rwsem); 6023 up_read(&nfsi->rwsem);
6013 goto out; 6024 goto out;
6014 } 6025 }
@@ -7573,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7573 goto out; 7584 goto out;
7574 } 7585 }
7575 ret = rpc_wait_for_completion_task(task); 7586 ret = rpc_wait_for_completion_task(task);
7576 if (!ret) { 7587 if (!ret)
7577 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7578
7579 if (task->tk_status == 0)
7580 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7581 ret = task->tk_status; 7588 ret = task->tk_status;
7582 }
7583 rpc_put_task(task); 7589 rpc_put_task(task);
7584out: 7590out:
7585 dprintk("<-- %s status=%d\n", __func__, ret); 7591 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7967,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
7967{ 7973{
7968 struct nfs4_layoutreturn *lrp = calldata; 7974 struct nfs4_layoutreturn *lrp = calldata;
7969 struct pnfs_layout_hdr *lo = lrp->args.layout; 7975 struct pnfs_layout_hdr *lo = lrp->args.layout;
7976 LIST_HEAD(freeme);
7970 7977
7971 dprintk("--> %s\n", __func__); 7978 dprintk("--> %s\n", __func__);
7972 spin_lock(&lo->plh_inode->i_lock); 7979 spin_lock(&lo->plh_inode->i_lock);
7973 if (lrp->res.lrs_present) 7980 if (lrp->res.lrs_present)
7974 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7981 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7982 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7975 pnfs_clear_layoutreturn_waitbit(lo); 7983 pnfs_clear_layoutreturn_waitbit(lo);
7976 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7977 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7978 lo->plh_block_lgets--; 7984 lo->plh_block_lgets--;
7979 spin_unlock(&lo->plh_inode->i_lock); 7985 spin_unlock(&lo->plh_inode->i_lock);
7986 pnfs_free_lseg_list(&freeme);
7980 pnfs_put_layout_hdr(lrp->args.layout); 7987 pnfs_put_layout_hdr(lrp->args.layout);
7981 nfs_iput_and_deactive(lrp->inode); 7988 nfs_iput_and_deactive(lrp->inode);
7982 kfree(calldata); 7989 kfree(calldata);
@@ -8590,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8590 .minor_version = 0, 8597 .minor_version = 0,
8591 .init_caps = NFS_CAP_READDIRPLUS 8598 .init_caps = NFS_CAP_READDIRPLUS
8592 | NFS_CAP_ATOMIC_OPEN 8599 | NFS_CAP_ATOMIC_OPEN
8593 | NFS_CAP_CHANGE_ATTR
8594 | NFS_CAP_POSIX_LOCK, 8600 | NFS_CAP_POSIX_LOCK,
8595 .init_client = nfs40_init_client, 8601 .init_client = nfs40_init_client,
8596 .shutdown_client = nfs40_shutdown_client, 8602 .shutdown_client = nfs40_shutdown_client,
@@ -8616,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8616 .minor_version = 1, 8622 .minor_version = 1,
8617 .init_caps = NFS_CAP_READDIRPLUS 8623 .init_caps = NFS_CAP_READDIRPLUS
8618 | NFS_CAP_ATOMIC_OPEN 8624 | NFS_CAP_ATOMIC_OPEN
8619 | NFS_CAP_CHANGE_ATTR
8620 | NFS_CAP_POSIX_LOCK 8625 | NFS_CAP_POSIX_LOCK
8621 | NFS_CAP_STATEID_NFSV41 8626 | NFS_CAP_STATEID_NFSV41
8622 | NFS_CAP_ATOMIC_OPEN_V1, 8627 | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8639,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8639 .minor_version = 2, 8644 .minor_version = 2,
8640 .init_caps = NFS_CAP_READDIRPLUS 8645 .init_caps = NFS_CAP_READDIRPLUS
8641 | NFS_CAP_ATOMIC_OPEN 8646 | NFS_CAP_ATOMIC_OPEN
8642 | NFS_CAP_CHANGE_ATTR
8643 | NFS_CAP_POSIX_LOCK 8647 | NFS_CAP_POSIX_LOCK
8644 | NFS_CAP_STATEID_NFSV41 8648 | NFS_CAP_STATEID_NFSV41
8645 | NFS_CAP_ATOMIC_OPEN_V1 8649 | NFS_CAP_ATOMIC_OPEN_V1
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 605840dc89cf..f2e2ad894461 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
2191 } 2191 }
2192} 2192}
2193 2193
2194static void nfs41_handle_state_revoked(struct nfs_client *clp) 2194static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2195{ 2195{
2196 nfs4_reset_all_state(clp); 2196 nfs4_reset_all_state(clp);
2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); 2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2198} 2198}
2199 2199
2200static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2201{
2202 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
2203 nfs4_schedule_state_manager(clp);
2204
2205 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2206}
2207
2200static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 2208static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2201{ 2209{
2202 /* This will need to handle layouts too */ 2210 /* FIXME: For now, we destroy all layouts. */
2203 nfs_expire_all_delegations(clp); 2211 pnfs_destroy_all_layouts(clp);
2212 /* FIXME: For now, we test all delegations+open state+locks. */
2213 nfs41_handle_some_state_revoked(clp);
2204 dprintk("%s: Recallable state revoked on server %s!\n", __func__, 2214 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2205 clp->cl_hostname); 2215 clp->cl_hostname);
2206} 2216}
2207 2217
2208static void nfs41_handle_backchannel_fault(struct nfs_client *clp) 2218static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2209{ 2219{
2210 nfs_expire_all_delegations(clp); 2220 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2211 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 2221 nfs4_schedule_state_manager(clp);
2212 nfs4_schedule_state_manager(clp); 2222
2213 dprintk("%s: server %s declared a backchannel fault\n", __func__, 2223 dprintk("%s: server %s declared a backchannel fault\n", __func__,
2214 clp->cl_hostname); 2224 clp->cl_hostname);
2215} 2225}
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2231 2241
2232 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 2242 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2233 nfs41_handle_server_reboot(clp); 2243 nfs41_handle_server_reboot(clp);
2234 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 2244 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2235 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 2245 nfs41_handle_all_state_revoked(clp);
2246 if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2236 SEQ4_STATUS_ADMIN_STATE_REVOKED)) 2247 SEQ4_STATUS_ADMIN_STATE_REVOKED))
2237 nfs41_handle_state_revoked(clp); 2248 nfs41_handle_some_state_revoked(clp);
2238 if (flags & SEQ4_STATUS_LEASE_MOVED) 2249 if (flags & SEQ4_STATUS_LEASE_MOVED)
2239 nfs4_schedule_lease_moved_recovery(clp); 2250 nfs4_schedule_lease_moved_recovery(clp);
2240 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 2251 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1da68d3b1eda..4984bbe55ff1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1100 mirror->pg_base = 0; 1100 mirror->pg_base = 0;
1101 mirror->pg_recoalesce = 0; 1101 mirror->pg_recoalesce = 0;
1102 1102
1103 desc->pg_moreio = 0;
1104
1105 while (!list_empty(&head)) { 1103 while (!list_empty(&head)) {
1106 struct nfs_page *req; 1104 struct nfs_page *req;
1107 1105
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1109 nfs_list_remove_request(req); 1107 nfs_list_remove_request(req);
1110 if (__nfs_pageio_add_request(desc, req)) 1108 if (__nfs_pageio_add_request(desc, req))
1111 continue; 1109 continue;
1112 if (desc->pg_error < 0) 1110 if (desc->pg_error < 0) {
1111 list_splice_tail(&head, &mirror->pg_list);
1112 mirror->pg_recoalesce = 1;
1113 return 0; 1113 return 0;
1114 }
1114 break; 1115 break;
1115 } 1116 }
1116 } while (mirror->pg_recoalesce); 1117 } while (mirror->pg_recoalesce);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0ba9a02c9566..70bf706b1090 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
352{ 352{
353 struct pnfs_layout_segment *s; 353 struct pnfs_layout_segment *s;
354 354
355 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) 355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356 return false; 356 return false;
357 357
358 list_for_each_entry(s, &lo->plh_segs, pls_list) 358 list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
362 return true; 362 return true;
363} 363}
364 364
365static bool
366pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367{
368 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
369 return false;
370 lo->plh_return_iomode = 0;
371 lo->plh_block_lgets++;
372 pnfs_get_layout_hdr(lo);
373 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
374 return true;
375}
376
365static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, 377static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
366 struct pnfs_layout_hdr *lo, struct inode *inode) 378 struct pnfs_layout_hdr *lo, struct inode *inode)
367{ 379{
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
372 if (pnfs_layout_need_return(lo, lseg)) { 384 if (pnfs_layout_need_return(lo, lseg)) {
373 nfs4_stateid stateid; 385 nfs4_stateid stateid;
374 enum pnfs_iomode iomode; 386 enum pnfs_iomode iomode;
387 bool send;
375 388
376 stateid = lo->plh_stateid; 389 stateid = lo->plh_stateid;
377 iomode = lo->plh_return_iomode; 390 iomode = lo->plh_return_iomode;
378 /* decreased in pnfs_send_layoutreturn() */ 391 send = pnfs_prepare_layoutreturn(lo);
379 lo->plh_block_lgets++;
380 lo->plh_return_iomode = 0;
381 spin_unlock(&inode->i_lock); 392 spin_unlock(&inode->i_lock);
382 pnfs_get_layout_hdr(lo); 393 if (send) {
383 394 /* Send an async layoutreturn so we dont deadlock */
384 /* Send an async layoutreturn so we dont deadlock */ 395 pnfs_send_layoutreturn(lo, stateid, iomode, false);
385 pnfs_send_layoutreturn(lo, stateid, iomode, false); 396 }
386 } else 397 } else
387 spin_unlock(&inode->i_lock); 398 spin_unlock(&inode->i_lock);
388} 399}
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
411 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); 422 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
412 423
413 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 424 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
425 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
426 spin_unlock(&inode->i_lock);
427 return;
428 }
414 pnfs_get_layout_hdr(lo); 429 pnfs_get_layout_hdr(lo);
415 pnfs_layout_remove_lseg(lo, lseg); 430 pnfs_layout_remove_lseg(lo, lseg);
416 spin_unlock(&inode->i_lock); 431 spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
451 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 466 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
452 if (atomic_dec_and_test(&lseg->pls_refcount)) { 467 if (atomic_dec_and_test(&lseg->pls_refcount)) {
453 struct pnfs_layout_hdr *lo = lseg->pls_layout; 468 struct pnfs_layout_hdr *lo = lseg->pls_layout;
469 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
470 return;
454 pnfs_get_layout_hdr(lo); 471 pnfs_get_layout_hdr(lo);
455 pnfs_layout_remove_lseg(lo, lseg); 472 pnfs_layout_remove_lseg(lo, lseg);
456 pnfs_free_lseg_async(lseg); 473 pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
924 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); 941 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
925 smp_mb__after_atomic(); 942 smp_mb__after_atomic();
926 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); 943 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
944 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
927} 945}
928 946
929static int 947static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
978 LIST_HEAD(tmp_list); 996 LIST_HEAD(tmp_list);
979 nfs4_stateid stateid; 997 nfs4_stateid stateid;
980 int status = 0, empty; 998 int status = 0, empty;
999 bool send;
981 1000
982 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 1001 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
983 1002
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
1007 /* Don't send a LAYOUTRETURN if list was initially empty */ 1026 /* Don't send a LAYOUTRETURN if list was initially empty */
1008 if (empty) { 1027 if (empty) {
1009 spin_unlock(&ino->i_lock); 1028 spin_unlock(&ino->i_lock);
1010 pnfs_put_layout_hdr(lo);
1011 dprintk("NFS: %s no layout segments to return\n", __func__); 1029 dprintk("NFS: %s no layout segments to return\n", __func__);
1012 goto out; 1030 goto out_put_layout_hdr;
1013 } 1031 }
1014 1032
1015 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1033 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1016 lo->plh_block_lgets++; 1034 send = pnfs_prepare_layoutreturn(lo);
1017 spin_unlock(&ino->i_lock); 1035 spin_unlock(&ino->i_lock);
1018 pnfs_free_lseg_list(&tmp_list); 1036 pnfs_free_lseg_list(&tmp_list);
1019 1037 if (send)
1020 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); 1038 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1039out_put_layout_hdr:
1040 pnfs_put_layout_hdr(lo);
1021out: 1041out:
1022 dprintk("<-- %s status: %d\n", __func__, status); 1042 dprintk("<-- %s status: %d\n", __func__, status);
1023 return status; 1043 return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
1097out_noroc: 1117out_noroc:
1098 if (lo) { 1118 if (lo) {
1099 stateid = lo->plh_stateid; 1119 stateid = lo->plh_stateid;
1100 layoutreturn = 1120 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1101 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1121 &lo->plh_flags))
1102 &lo->plh_flags); 1122 layoutreturn = pnfs_prepare_layoutreturn(lo);
1103 if (layoutreturn) {
1104 lo->plh_block_lgets++;
1105 pnfs_get_layout_hdr(lo);
1106 }
1107 } 1123 }
1108 spin_unlock(&ino->i_lock); 1124 spin_unlock(&ino->i_lock);
1109 if (layoutreturn) { 1125 if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1146 struct pnfs_layout_segment *lseg; 1162 struct pnfs_layout_segment *lseg;
1147 nfs4_stateid stateid; 1163 nfs4_stateid stateid;
1148 u32 current_seqid; 1164 u32 current_seqid;
1149 bool found = false, layoutreturn = false; 1165 bool layoutreturn = false;
1150 1166
1151 spin_lock(&ino->i_lock); 1167 spin_lock(&ino->i_lock);
1152 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) 1168 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
1153 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1169 if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1154 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1170 continue;
1155 found = true; 1171 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
1156 goto out; 1172 continue;
1157 } 1173 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1174 spin_unlock(&ino->i_lock);
1175 return true;
1176 }
1158 lo = nfsi->layout; 1177 lo = nfsi->layout;
1159 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1178 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1160 1179
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1162 * a barrier, we choose the worst-case barrier. 1181 * a barrier, we choose the worst-case barrier.
1163 */ 1182 */
1164 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1183 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1165out: 1184 stateid = lo->plh_stateid;
1166 if (!found) { 1185 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1167 stateid = lo->plh_stateid; 1186 &lo->plh_flags))
1168 layoutreturn = 1187 layoutreturn = pnfs_prepare_layoutreturn(lo);
1169 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1188 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1170 &lo->plh_flags); 1189 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1171 if (layoutreturn) { 1190
1172 lo->plh_block_lgets++;
1173 pnfs_get_layout_hdr(lo);
1174 }
1175 }
1176 spin_unlock(&ino->i_lock); 1191 spin_unlock(&ino->i_lock);
1177 if (layoutreturn) { 1192 if (layoutreturn) {
1178 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1179 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); 1193 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
1194 return true;
1180 } 1195 }
1181 return found; 1196 return false;
1182} 1197}
1183 1198
1184/* 1199/*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
1695 spin_lock(&inode->i_lock); 1710 spin_lock(&inode->i_lock);
1696 /* set failure bit so that pnfs path will be retried later */ 1711 /* set failure bit so that pnfs path will be retried later */
1697 pnfs_layout_set_fail_bit(lo, iomode); 1712 pnfs_layout_set_fail_bit(lo, iomode);
1698 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1699 if (lo->plh_return_iomode == 0) 1713 if (lo->plh_return_iomode == 0)
1700 lo->plh_return_iomode = range.iomode; 1714 lo->plh_return_iomode = range.iomode;
1701 else if (lo->plh_return_iomode != range.iomode) 1715 else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2207 if (ld->prepare_layoutcommit) { 2221 if (ld->prepare_layoutcommit) {
2208 status = ld->prepare_layoutcommit(&data->args); 2222 status = ld->prepare_layoutcommit(&data->args);
2209 if (status) { 2223 if (status) {
2224 put_rpccred(data->cred);
2210 spin_lock(&inode->i_lock); 2225 spin_lock(&inode->i_lock);
2211 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 2226 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2212 if (end_pos > nfsi->layout->plh_lwb) 2227 if (end_pos > nfsi->layout->plh_lwb)
2213 nfsi->layout->plh_lwb = end_pos; 2228 nfsi->layout->plh_lwb = end_pos;
2214 spin_unlock(&inode->i_lock); 2229 goto out_unlock;
2215 put_rpccred(data->cred);
2216 goto clear_layoutcommitting;
2217 } 2230 }
2218 } 2231 }
2219 2232
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 65869ca9c851..75a35a1afa79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1379{ 1379{
1380 struct nfs_pgio_args *argp = &hdr->args; 1380 struct nfs_pgio_args *argp = &hdr->args;
1381 struct nfs_pgio_res *resp = &hdr->res; 1381 struct nfs_pgio_res *resp = &hdr->res;
1382 u64 size = argp->offset + resp->count;
1382 1383
1383 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1384 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1385 fattr->size = size;
1386 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1387 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1384 return; 1388 return;
1385 if (argp->offset + resp->count != fattr->size) 1389 }
1386 return; 1390 if (size != fattr->size)
1387 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1388 return; 1391 return;
1389 /* Set attribute barrier */ 1392 /* Set attribute barrier */
1390 nfs_fattr_set_barrier(fattr); 1393 nfs_fattr_set_barrier(fattr);
1394 /* ...and update size */
1395 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1391} 1396}
1392 1397
1393void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1398void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1394{ 1399{
1395 struct nfs_fattr *fattr = hdr->res.fattr; 1400 struct nfs_fattr *fattr = &hdr->fattr;
1396 struct inode *inode = hdr->inode; 1401 struct inode *inode = hdr->inode;
1397 1402
1398 if (fattr == NULL)
1399 return;
1400 spin_lock(&inode->i_lock); 1403 spin_lock(&inode->i_lock);
1401 nfs_writeback_check_extend(hdr, fattr); 1404 nfs_writeback_check_extend(hdr, fattr);
1402 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6904213a4363..ebf90e487c75 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
212 BUG_ON(!ls->ls_file); 212 BUG_ON(!ls->ls_file);
213 213
214 if (nfsd4_layout_setlease(ls)) { 214 if (nfsd4_layout_setlease(ls)) {
215 fput(ls->ls_file);
215 put_nfs4_file(fp); 216 put_nfs4_file(fp);
216 kmem_cache_free(nfs4_layout_stateid_cache, ls); 217 kmem_cache_free(nfs4_layout_stateid_cache, ls);
217 return NULL; 218 return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 61dfb33f0559..95202719a1fd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4397} 4397}
4398 4398
4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4400{ 4400{
4401 if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) 4401 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4402 return nfserr_bad_stateid; 4402 return nfserr_bad_stateid;
4403 return nfs_ok; 4403 return nfs_ok;
4404} 4404}
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4601{ 4601{
4602 __be32 status; 4602 __be32 status;
4603 4603
4604 status = nfs4_check_fh(fhp, ols);
4605 if (status)
4606 return status;
4607 status = nfsd4_check_openowner_confirmed(ols); 4604 status = nfsd4_check_openowner_confirmed(ols);
4608 if (status) 4605 if (status)
4609 return status; 4606 return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4690 status = nfserr_bad_stateid; 4687 status = nfserr_bad_stateid;
4691 break; 4688 break;
4692 } 4689 }
4690 if (status)
4691 goto out;
4692 status = nfs4_check_fh(fhp, s);
4693 4693
4694done: 4694done:
4695 if (!status && filpp) 4695 if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4799 if (status) 4799 if (status)
4800 return status; 4800 return status;
4801 return nfs4_check_fh(current_fh, stp); 4801 return nfs4_check_fh(current_fh, &stp->st_stid);
4802} 4802}
4803 4803
4804/* 4804/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 54633858733a..75e0563c09d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ 2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
2144 FATTR4_WORD0_RDATTR_ERROR) 2144 FATTR4_WORD0_RDATTR_ERROR)
2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID 2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
2146#define WORD2_ABSENT_FS_ATTRS 0
2146 2147
2147#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2148#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2148static inline __be32 2149static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2171{ return 0; } 2172{ return 0; }
2172#endif 2173#endif
2173 2174
2174static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) 2175static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
2175{ 2176{
2176 /* As per referral draft: */ 2177 /* As per referral draft: */
2177 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || 2178 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
2184 } 2185 }
2185 *bmval0 &= WORD0_ABSENT_FS_ATTRS; 2186 *bmval0 &= WORD0_ABSENT_FS_ATTRS;
2186 *bmval1 &= WORD1_ABSENT_FS_ATTRS; 2187 *bmval1 &= WORD1_ABSENT_FS_ATTRS;
2188 *bmval2 &= WORD2_ABSENT_FS_ATTRS;
2187 return 0; 2189 return 0;
2188} 2190}
2189 2191
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2246 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); 2248 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
2247 2249
2248 if (exp->ex_fslocs.migrated) { 2250 if (exp->ex_fslocs.migrated) {
2249 BUG_ON(bmval[2]); 2251 status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
2250 status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
2251 if (status) 2252 if (status)
2252 goto out; 2253 goto out;
2253 } 2254 }
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2286 } 2287 }
2287 2288
2288#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2289#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2289 if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || 2290 if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
2290 bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { 2291 bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
2291 err = security_inode_getsecctx(d_inode(dentry), 2292 err = security_inode_getsecctx(d_inode(dentry),
2292 &context, &contextlen); 2293 &context, &contextlen);
2293 contextsupport = (err == 0); 2294 contextsupport = (err == 0);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 9a20e513d7eb..aba43811d6ef 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -1369,7 +1369,6 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1369 case NILFS_IOCTL_SYNC: 1369 case NILFS_IOCTL_SYNC:
1370 case NILFS_IOCTL_RESIZE: 1370 case NILFS_IOCTL_RESIZE:
1371 case NILFS_IOCTL_SET_ALLOC_RANGE: 1371 case NILFS_IOCTL_SET_ALLOC_RANGE:
1372 case FITRIM:
1373 break; 1372 break;
1374 default: 1373 default:
1375 return -ENOIOCTLCMD; 1374 return -ENOIOCTLCMD;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 92e48c70f0f0..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
412 unsigned int flags) 412 unsigned int flags)
413{ 413{
414 struct fsnotify_mark *lmark, *mark; 414 struct fsnotify_mark *lmark, *mark;
415 LIST_HEAD(to_free);
415 416
417 /*
418 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
419 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
420 * to_free list so we have to use mark_mutex even when accessing that
421 * list. And freeing mark requires us to drop mark_mutex. So we can
422 * reliably free only the first mark in the list. That's why we first
423 * move marks to free to to_free list in one go and then free marks in
424 * to_free list one by one.
425 */
416 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
417 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
418 if (mark->flags & flags) { 428 if (mark->flags & flags)
419 fsnotify_get_mark(mark); 429 list_move(&mark->g_list, &to_free);
420 fsnotify_destroy_mark_locked(mark, group);
421 fsnotify_put_mark(mark);
422 }
423 } 430 }
424 mutex_unlock(&group->mark_mutex); 431 mutex_unlock(&group->mark_mutex);
432
433 while (1) {
434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
435 if (list_empty(&to_free)) {
436 mutex_unlock(&group->mark_mutex);
437 break;
438 }
439 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
440 fsnotify_get_mark(mark);
441 fsnotify_destroy_mark_locked(mark, group);
442 mutex_unlock(&group->mark_mutex);
443 fsnotify_put_mark(mark);
444 }
425} 445}
426 446
427/* 447/*
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
685 685
686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { 686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
687 u64 s = i_size_read(inode); 687 u64 s = i_size_read(inode);
688 sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + 688 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
689 (do_div(s, osb->s_clustersize) >> 9); 689 (do_div(s, osb->s_clustersize) >> 9);
690 690
691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, 691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); 910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
911 911
912 ret = blkdev_issue_zeroout(osb->sb->s_bdev, 912 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
913 p_cpos << (osb->s_clustersize_bits - 9), 913 (u64)p_cpos << (osb->s_clustersize_bits - 9),
914 zero_len_head >> 9, GFP_NOFS, false); 914 zero_len_head >> 9, GFP_NOFS, false);
915 if (ret < 0) 915 if (ret < 0)
916 mlog_errno(ret); 916 mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4025 osb->dc_work_sequence = osb->dc_wake_sequence; 4025 osb->dc_work_sequence = osb->dc_wake_sequence;
4026 4026
4027 processed = osb->blocked_lock_count; 4027 processed = osb->blocked_lock_count;
4028 while (processed) { 4028 /*
4029 BUG_ON(list_empty(&osb->blocked_lock_list)); 4029 * blocked lock processing in this loop might call iput which can
4030 4030 * remove items off osb->blocked_lock_list. Downconvert up to
4031 * 'processed' number of locks, but stop short if we had some
4032 * removed in ocfs2_mark_lockres_freeing when downconverting.
4033 */
4034 while (processed && !list_empty(&osb->blocked_lock_list)) {
4031 lockres = list_entry(osb->blocked_lock_list.next, 4035 lockres = list_entry(osb->blocked_lock_list.next,
4032 struct ocfs2_lock_res, l_blocked_list); 4036 struct ocfs2_lock_res, l_blocked_list);
4033 list_del_init(&lockres->l_blocked_list); 4037 list_del_init(&lockres->l_blocked_list);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 53e6c40ed4c6..3cb097ccce60 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -980,7 +980,6 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
980 case OCFS2_IOC_GROUP_EXTEND: 980 case OCFS2_IOC_GROUP_EXTEND:
981 case OCFS2_IOC_GROUP_ADD: 981 case OCFS2_IOC_GROUP_ADD:
982 case OCFS2_IOC_GROUP_ADD64: 982 case OCFS2_IOC_GROUP_ADD64:
983 case FITRIM:
984 break; 983 break;
985 case OCFS2_IOC_REFLINK: 984 case OCFS2_IOC_REFLINK:
986 if (copy_from_user(&args, argp, sizeof(args))) 985 if (copy_from_user(&args, argp, sizeof(args)))
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index f140e3dbfb7b..d9da5a4e9382 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -343,6 +343,9 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
343 struct path realpath; 343 struct path realpath;
344 enum ovl_path_type type; 344 enum ovl_path_type type;
345 345
346 if (d_is_dir(dentry))
347 return d_backing_inode(dentry);
348
346 type = ovl_path_real(dentry, &realpath); 349 type = ovl_path_real(dentry, &realpath);
347 if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) { 350 if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
348 err = ovl_want_write(dentry); 351 err = ovl_want_write(dentry);
diff --git a/fs/pnode.h b/fs/pnode.h
index 7114ce6e6b9e..0fcdbe7ca648 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -20,8 +20,6 @@
20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) 20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) 21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) 22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
23#define IS_MNT_LOCKED_AND_LAZY(m) \
24 (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
25 23
26#define CL_EXPIRE 0x01 24#define CL_EXPIRE 0x01
27#define CL_SLAVE 0x02 25#define CL_SLAVE 0x02
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index d751fcb637bb..1ade1206bb89 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -75,3 +75,9 @@ config PROC_PAGE_MONITOR
75config PROC_CHILDREN 75config PROC_CHILDREN
76 bool "Include /proc/<pid>/task/<tid>/children file" 76 bool "Include /proc/<pid>/task/<tid>/children file"
77 default n 77 default n
78 help
79 Provides a fast way to retrieve first level children pids of a task. See
80 <file:Documentation/filesystems/proc.txt> for more information.
81
82 Say Y if you are running any user-space software which takes benefit from
83 this interface. For example, rkt is such a piece of software.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 87782e874b6a..aa50d1ac28fc 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -243,6 +243,11 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
243 len1 = arg_end - arg_start; 243 len1 = arg_end - arg_start;
244 len2 = env_end - env_start; 244 len2 = env_end - env_start;
245 245
246 /* Empty ARGV. */
247 if (len1 == 0) {
248 rv = 0;
249 goto out_free_page;
250 }
246 /* 251 /*
247 * Inherently racy -- command line shares address space 252 * Inherently racy -- command line shares address space
248 * with code and data. 253 * with code and data.
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 91a4e6426321..92e6726f6e37 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -92,7 +92,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
92 roundup(sizeof(CORE_STR), 4)) + 92 roundup(sizeof(CORE_STR), 4)) +
93 roundup(sizeof(struct elf_prstatus), 4) + 93 roundup(sizeof(struct elf_prstatus), 4) +
94 roundup(sizeof(struct elf_prpsinfo), 4) + 94 roundup(sizeof(struct elf_prpsinfo), 4) +
95 roundup(sizeof(struct task_struct), 4); 95 roundup(arch_task_struct_size, 4);
96 *elf_buflen = PAGE_ALIGN(*elf_buflen); 96 *elf_buflen = PAGE_ALIGN(*elf_buflen);
97 return size + *elf_buflen; 97 return size + *elf_buflen;
98} 98}
@@ -415,7 +415,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
415 /* set up the task structure */ 415 /* set up the task structure */
416 notes[2].name = CORE_STR; 416 notes[2].name = CORE_STR;
417 notes[2].type = NT_TASKSTRUCT; 417 notes[2].type = NT_TASKSTRUCT;
418 notes[2].datasz = sizeof(struct task_struct); 418 notes[2].datasz = arch_task_struct_size;
419 notes[2].data = current; 419 notes[2].data = current;
420 420
421 nhdr->p_filesz += notesize(&notes[2]); 421 nhdr->p_filesz += notesize(&notes[2]);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
121 * Other callers might not initialize the si_lsb field, 121 * Other callers might not initialize the si_lsb field,
122 * so check explicitly for the right codes here. 122 * so check explicitly for the right codes here.
123 */ 123 */
124 if (kinfo->si_code == BUS_MCEERR_AR || 124 if (kinfo->si_signo == SIGBUS &&
125 kinfo->si_code == BUS_MCEERR_AO) 125 (kinfo->si_code == BUS_MCEERR_AR ||
126 kinfo->si_code == BUS_MCEERR_AO))
126 err |= __put_user((short) kinfo->si_addr_lsb, 127 err |= __put_user((short) kinfo->si_addr_lsb,
127 &uinfo->ssi_addr_lsb); 128 &uinfo->ssi_addr_lsb);
128#endif 129#endif
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6afac3d561ac..8d0b3ade0ff0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize - 1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1653 sizeof(struct unallocSpaceEntry)); 1653 sizeof(struct unallocSpaceEntry));
1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); 1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1655 use->descTag.tagLocation = 1655 crclen = sizeof(struct unallocSpaceEntry);
1656 cpu_to_le32(iinfo->i_location.logicalBlockNum);
1657 crclen = sizeof(struct unallocSpaceEntry) +
1658 iinfo->i_lenAlloc - sizeof(struct tag);
1659 use->descTag.descCRCLength = cpu_to_le16(crclen);
1660 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1661 sizeof(struct tag),
1662 crclen));
1663 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1664 1656
1665 goto out; 1657 goto finish;
1666 } 1658 }
1667 1659
1668 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1660 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1782 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1774 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1783 crclen = sizeof(struct extendedFileEntry); 1775 crclen = sizeof(struct extendedFileEntry);
1784 } 1776 }
1777
1778finish:
1785 if (iinfo->i_strat4096) { 1779 if (iinfo->i_strat4096) {
1786 fe->icbTag.strategyType = cpu_to_le16(4096); 1780 fe->icbTag.strategyType = cpu_to_le16(4096);
1787 fe->icbTag.strategyParameter = cpu_to_le16(1); 1781 fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1791 fe->icbTag.numEntries = cpu_to_le16(1); 1785 fe->icbTag.numEntries = cpu_to_le16(1);
1792 } 1786 }
1793 1787
1794 if (S_ISDIR(inode->i_mode)) 1788 if (iinfo->i_use)
1789 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1790 else if (S_ISDIR(inode->i_mode))
1795 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1791 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1796 else if (S_ISREG(inode->i_mode)) 1792 else if (S_ISREG(inode->i_mode))
1797 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; 1793 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1828 crclen)); 1824 crclen));
1829 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1825 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1830 1826
1831out:
1832 set_buffer_uptodate(bh); 1827 set_buffer_uptodate(bh);
1833 unlock_buffer(bh); 1828 unlock_buffer(bh);
1834 1829
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 20de88d1bf86..dd714037c322 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
159 struct xfs_buf *bp) 159 struct xfs_buf *bp)
160{ 160{
161 struct xfs_mount *mp = bp->b_target->bt_mount; 161 struct xfs_mount *mp = bp->b_target->bt_mount;
162 struct xfs_buf_log_item *bip = bp->b_fspriv; 162 int blksize = mp->m_attr_geo->blksize;
163 char *ptr; 163 char *ptr;
164 int len; 164 int len;
165 xfs_daddr_t bno; 165 xfs_daddr_t bno;
166 int blksize = mp->m_attr_geo->blksize;
167 166
168 /* no verification of non-crc buffers */ 167 /* no verification of non-crc buffers */
169 if (!xfs_sb_version_hascrc(&mp->m_sb)) 168 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
175 ASSERT(len >= blksize); 174 ASSERT(len >= blksize);
176 175
177 while (len > 0) { 176 while (len > 0) {
177 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
178
178 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 179 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
179 xfs_buf_ioerror(bp, -EFSCORRUPTED); 180 xfs_buf_ioerror(bp, -EFSCORRUPTED);
180 xfs_verifier_error(bp); 181 xfs_verifier_error(bp);
181 return; 182 return;
182 } 183 }
183 if (bip) {
184 struct xfs_attr3_rmt_hdr *rmt;
185 184
186 rmt = (struct xfs_attr3_rmt_hdr *)ptr; 185 /*
187 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); 186 * Ensure we aren't writing bogus LSNs to disk. See
187 * xfs_attr3_rmt_hdr_set() for the explanation.
188 */
189 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
190 xfs_buf_ioerror(bp, -EFSCORRUPTED);
191 xfs_verifier_error(bp);
192 return;
188 } 193 }
189 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); 194 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
190 195
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
221 rmt->rm_owner = cpu_to_be64(ino); 226 rmt->rm_owner = cpu_to_be64(ino);
222 rmt->rm_blkno = cpu_to_be64(bno); 227 rmt->rm_blkno = cpu_to_be64(bno);
223 228
229 /*
230 * Remote attribute blocks are written synchronously, so we don't
231 * have an LSN that we can stamp in them that makes any sense to log
232 * recovery. To ensure that log recovery handles overwrites of these
233 * blocks sanely (i.e. once they've been freed and reallocated as some
234 * other type of metadata) we need to ensure that the LSN has a value
235 * that tells log recovery to ignore the LSN and overwrite the buffer
236 * with whatever is in it's log. To do this, we use the magic
237 * NULLCOMMITLSN to indicate that the LSN is invalid.
238 */
239 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
240
224 return sizeof(struct xfs_attr3_rmt_hdr); 241 return sizeof(struct xfs_attr3_rmt_hdr);
225} 242}
226 243
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
434 451
435 /* 452 /*
436 * Allocate a single extent, up to the size of the value. 453 * Allocate a single extent, up to the size of the value.
454 *
455 * Note that we have to consider this a data allocation as we
456 * write the remote attribute without logging the contents.
457 * Hence we must ensure that we aren't using blocks that are on
458 * the busy list so that we don't overwrite blocks which have
459 * recently been freed but their transactions are not yet
460 * committed to disk. If we overwrite the contents of a busy
461 * extent and then crash then the block may not contain the
462 * correct metadata after log recovery occurs.
437 */ 463 */
438 xfs_bmap_init(args->flist, args->firstblock); 464 xfs_bmap_init(args->flist, args->firstblock);
439 nmap = 1; 465 nmap = 1;
440 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, 466 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
441 blkcnt, 467 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
442 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 468 args->total, &map, &nmap, args->flist);
443 args->firstblock, args->total, &map, &nmap,
444 args->flist);
445 if (!error) { 469 if (!error) {
446 error = xfs_bmap_finish(&args->trans, args->flist, 470 error = xfs_bmap_finish(&args->trans, args->flist,
447 &committed); 471 &committed);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f0e8249722d4..db4acc1c3e73 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
1514 struct vm_area_struct *vma, 1514 struct vm_area_struct *vma,
1515 struct vm_fault *vmf) 1515 struct vm_fault *vmf)
1516{ 1516{
1517 struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); 1517 struct inode *inode = file_inode(vma->vm_file);
1518 int ret; 1518 int ret;
1519 1519
1520 trace_xfs_filemap_fault(ip); 1520 trace_xfs_filemap_fault(XFS_I(inode));
1521 1521
1522 /* DAX can shortcut the normal fault path on write faults! */ 1522 /* DAX can shortcut the normal fault path on write faults! */
1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) 1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1524 return xfs_filemap_page_mkwrite(vma, vmf); 1524 return xfs_filemap_page_mkwrite(vma, vmf);
1525 1525
1526 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1526 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1527 ret = filemap_fault(vma, vmf); 1527 if (IS_DAX(inode)) {
1528 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1528 /*
1529 * we do not want to trigger unwritten extent conversion on read
1530 * faults - that is unnecessary overhead and would also require
1531 * changes to xfs_get_blocks_direct() to map unwritten extent
1532 * ioend for conversion on read-only mappings.
1533 */
1534 ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
1535 } else
1536 ret = filemap_fault(vma, vmf);
1537 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1529 1538
1530 return ret; 1539 return ret;
1531} 1540}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 01dd228ca05e..480ebba8464f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
1887 break; 1887 break;
1888 case XFS_ATTR3_RMT_MAGIC: 1888 case XFS_ATTR3_RMT_MAGIC:
1889 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 1889 /*
1890 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; 1890 * Remote attr blocks are written synchronously, rather than
1891 break; 1891 * being logged. That means they do not contain a valid LSN
1892 * (i.e. transactionally ordered) in them, and hence any time we
1893 * see a buffer to replay over the top of a remote attribute
1894 * block we should simply do so.
1895 */
1896 goto recover_immediately;
1892 case XFS_SB_MAGIC: 1897 case XFS_SB_MAGIC:
1893 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 1898 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
1894 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 1899 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644
index 000000000000..5ff0e5193f85
--- /dev/null
+++ b/include/asm-generic/mm-arch-hooks.h
@@ -0,0 +1,16 @@
1/*
2 * Architecture specific mm hooks
3 */
4
5#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
6#define _ASM_GENERIC_MM_ARCH_HOOKS_H
7
8/*
9 * This file should be included through arch/../include/asm/Kbuild for
10 * the architecture which doesn't need specific mm hooks.
11 *
12 * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
13 * are used.
14 */
15
16#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 48db6a56975f..5aa519711e0b 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
691 struct timer_list disable_timer; /* delayed disable timer */ 691 struct timer_list disable_timer; /* delayed disable timer */
692 692
693 /* vblank counter, protected by dev->vblank_time_lock for writes */ 693 /* vblank counter, protected by dev->vblank_time_lock for writes */
694 unsigned long count; 694 u32 count;
695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */ 695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */
696 struct timeval time[DRM_VBLANKTIME_RBSIZE]; 696 struct timeval time[DRM_VBLANKTIME_RBSIZE];
697 697
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 57ca8cc383a6..3b4d8a4a23fb 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -743,8 +743,6 @@ struct drm_connector {
743 uint8_t num_h_tile, num_v_tile; 743 uint8_t num_h_tile, num_v_tile;
744 uint8_t tile_h_loc, tile_v_loc; 744 uint8_t tile_h_loc, tile_v_loc;
745 uint16_t tile_h_size, tile_v_size; 745 uint16_t tile_h_size, tile_v_size;
746
747 struct list_head destroy_list;
748}; 746};
749 747
750/** 748/**
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c8fc187061de..918aa68b5199 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
168 * @get_modes: get mode list for this connector 168 * @get_modes: get mode list for this connector
169 * @mode_valid: is this mode valid on the given connector? (optional) 169 * @mode_valid: is this mode valid on the given connector? (optional)
170 * @best_encoder: return the preferred encoder for this connector 170 * @best_encoder: return the preferred encoder for this connector
171 * @atomic_best_encoder: atomic version of @best_encoder
171 * 172 *
172 * The helper operations are called by the mid-layer CRTC helper. 173 * The helper operations are called by the mid-layer CRTC helper.
173 */ 174 */
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
176 enum drm_mode_status (*mode_valid)(struct drm_connector *connector, 177 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
177 struct drm_display_mode *mode); 178 struct drm_display_mode *mode);
178 struct drm_encoder *(*best_encoder)(struct drm_connector *connector); 179 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
180 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
181 struct drm_connector_state *connector_state);
179}; 182};
180 183
181extern void drm_helper_disable_unused_functions(struct drm_device *dev); 184extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 799050198323..53c53c459b15 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -348,6 +348,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
348} 348}
349 349
350/** 350/**
351 * drm_eld_sad - Get ELD SAD structures.
352 * @eld: pointer to an eld memory structure with sad_count set
353 */
354static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
355{
356 unsigned int ver, mnl;
357
358 ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
359 if (ver != 2 && ver != 31)
360 return NULL;
361
362 mnl = drm_eld_mnl(eld);
363 if (mnl > 16)
364 return NULL;
365
366 return eld + DRM_ELD_CEA_SAD(mnl, 0);
367}
368
369/**
351 * drm_eld_sad_count - Get ELD SAD count. 370 * drm_eld_sad_count - Get ELD SAD count.
352 * @eld: pointer to an eld memory structure with sad_count set 371 * @eld: pointer to an eld memory structure with sad_count set
353 */ 372 */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 176 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 177 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 178 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c471dfc93b71..d2445fa9999f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -58,6 +58,19 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
58 acpi_fwnode_handle(adev) : NULL) 58 acpi_fwnode_handle(adev) : NULL)
59#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) 59#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
60 60
61/**
62 * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
63 * the PCI-defined class-code information
64 *
65 * @_cls : the class, subclass, prog-if triple for this device
66 * @_msk : the class mask for this device
67 *
68 * This macro is used to create a struct acpi_device_id that matches a
69 * specific PCI class. The .id and .driver_data fields will be left
70 * initialized with the default value.
71 */
72#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
73
61static inline bool has_acpi_companion(struct device *dev) 74static inline bool has_acpi_companion(struct device *dev)
62{ 75{
63 return is_acpi_node(dev->fwnode); 76 return is_acpi_node(dev->fwnode);
@@ -309,9 +322,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
309 322
310int acpi_resources_are_enforced(void); 323int acpi_resources_are_enforced(void);
311 324
312int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
313 unsigned long flags, char *desc);
314
315#ifdef CONFIG_HIBERNATION 325#ifdef CONFIG_HIBERNATION
316void __init acpi_no_s4_hw_signature(void); 326void __init acpi_no_s4_hw_signature(void);
317#endif 327#endif
@@ -446,6 +456,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *);
446#define ACPI_COMPANION(dev) (NULL) 456#define ACPI_COMPANION(dev) (NULL)
447#define ACPI_COMPANION_SET(dev, adev) do { } while (0) 457#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
448#define ACPI_HANDLE(dev) (NULL) 458#define ACPI_HANDLE(dev) (NULL)
459#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
449 460
450struct fwnode_handle; 461struct fwnode_handle;
451 462
@@ -507,13 +518,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
507 return 0; 518 return 0;
508} 519}
509 520
510static inline int acpi_reserve_region(u64 start, unsigned int length,
511 u8 space_id, unsigned long flags,
512 char *desc)
513{
514 return -ENXIO;
515}
516
517struct acpi_table_header; 521struct acpi_table_header;
518static inline int acpi_table_parse(char *id, 522static inline int acpi_table_parse(char *id,
519 int (*handler)(struct acpi_table_header *)) 523 int (*handler)(struct acpi_table_header *))
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h
index c7df89f99115..58fe9e8b6fd7 100644
--- a/include/linux/amba/sp810.h
+++ b/include/linux/amba/sp810.h
@@ -2,7 +2,7 @@
2 * ARM PrimeXsys System Controller SP810 header file 2 * ARM PrimeXsys System Controller SP810 header file
3 * 3 *
4 * Copyright (C) 2009 ST Microelectronics 4 * Copyright (C) 2009 ST Microelectronics
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <vireshk@kernel.org>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fed36418dd1c..d2992bfa1706 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -45,6 +45,7 @@ enum {
45 ATA_SECT_SIZE = 512, 45 ATA_SECT_SIZE = 512,
46 ATA_MAX_SECTORS_128 = 128, 46 ATA_MAX_SECTORS_128 = 128,
47 ATA_MAX_SECTORS = 256, 47 ATA_MAX_SECTORS = 256,
48 ATA_MAX_SECTORS_1024 = 1024,
48 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ 49 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
49 ATA_MAX_SECTORS_TAPE = 65535, 50 ATA_MAX_SECTORS_TAPE = 65535,
50 51
@@ -384,8 +385,6 @@ enum {
384 SATA_SSP = 0x06, /* Software Settings Preservation */ 385 SATA_SSP = 0x06, /* Software Settings Preservation */
385 SATA_DEVSLP = 0x09, /* Device Sleep */ 386 SATA_DEVSLP = 0x09, /* Device Sleep */
386 387
387 SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
388
389 /* feature values for SET_MAX */ 388 /* feature values for SET_MAX */
390 ATA_SET_MAX_ADDR = 0x00, 389 ATA_SET_MAX_ADDR = 0x00,
391 ATA_SET_MAX_PASSWD = 0x01, 390 ATA_SET_MAX_PASSWD = 0x01,
@@ -529,8 +528,6 @@ struct ata_bmdma_prd {
529#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20) 528#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
530#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4)) 529#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
531#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)) 530#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
532#define ata_id_has_ncq_autosense(id) \
533 ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
534 531
535static inline bool ata_id_has_hipm(const u16 *id) 532static inline bool ata_id_has_hipm(const u16 *id)
536{ 533{
@@ -719,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
719 return false; 716 return false;
720} 717}
721 718
722static inline bool ata_id_has_sense_reporting(const u16 *id)
723{
724 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
725 return false;
726 return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
727}
728
729static inline bool ata_id_sense_reporting_enabled(const u16 *id)
730{
731 if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
732 return false;
733 return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
734}
735
736/** 719/**
737 * ata_id_major_version - get ATA level of drive 720 * ata_id_major_version - get ATA level of drive
738 * @id: Identify data 721 * @id: Identify data
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 58cfab80dd70..1b62d768c7df 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -47,6 +47,7 @@ struct blkcg {
47 47
48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS]; 48 struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
49 49
50 struct list_head all_blkcgs_node;
50#ifdef CONFIG_CGROUP_WRITEBACK 51#ifdef CONFIG_CGROUP_WRITEBACK
51 struct list_head cgwb_list; 52 struct list_head cgwb_list;
52#endif 53#endif
@@ -88,18 +89,12 @@ struct blkg_policy_data {
88 * Policies that need to keep per-blkcg data which is independent 89 * Policies that need to keep per-blkcg data which is independent
89 * from any request_queue associated to it must specify its size 90 * from any request_queue associated to it must specify its size
90 * with the cpd_size field of the blkcg_policy structure and 91 * with the cpd_size field of the blkcg_policy structure and
91 * embed a blkcg_policy_data in it. blkcg core allocates 92 * embed a blkcg_policy_data in it. cpd_init() is invoked to let
92 * policy-specific per-blkcg structures lazily the first time 93 * each policy handle per-blkcg data.
93 * they are actually needed, so it handles them together with
94 * blkgs. cpd_init() is invoked to let each policy handle
95 * per-blkcg data.
96 */ 94 */
97struct blkcg_policy_data { 95struct blkcg_policy_data {
98 /* the policy id this per-policy data belongs to */ 96 /* the policy id this per-policy data belongs to */
99 int plid; 97 int plid;
100
101 /* used during policy activation */
102 struct list_head alloc_node;
103}; 98};
104 99
105/* association between a blk cgroup and a request queue */ 100/* association between a blk cgroup and a request queue */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 73b45225a7ca..e6797ded700e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE); 317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
318} 318}
319 319
320
321static inline struct buffer_head *
322sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
323{
324 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
325}
326
320static inline struct buffer_head * 327static inline struct buffer_head *
321sb_find_get_block(struct super_block *sb, sector_t block) 328sb_find_get_block(struct super_block *sb, sector_t block)
322{ 329{
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index b6a52a4b457a..51bb6532785c 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -27,10 +27,12 @@
27/** 27/**
28 * struct can_skb_priv - private additional data inside CAN sk_buffs 28 * struct can_skb_priv - private additional data inside CAN sk_buffs
29 * @ifindex: ifindex of the first interface the CAN frame appeared on 29 * @ifindex: ifindex of the first interface the CAN frame appeared on
30 * @skbcnt: atomic counter to have an unique id together with skb pointer
30 * @cf: align to the following CAN frame at skb->data 31 * @cf: align to the following CAN frame at skb->data
31 */ 32 */
32struct can_skb_priv { 33struct can_skb_priv {
33 int ifindex; 34 int ifindex;
35 int skbcnt;
34 struct can_frame cf[0]; 36 struct can_frame cf[0];
35}; 37};
36 38
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index e15499422fdc..37753278987a 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -8,6 +8,7 @@
8#include <linux/radix-tree.h> 8#include <linux/radix-tree.h>
9#include <linux/uio.h> 9#include <linux/uio.h>
10#include <linux/workqueue.h> 10#include <linux/workqueue.h>
11#include <net/net_namespace.h>
11 12
12#include <linux/ceph/types.h> 13#include <linux/ceph/types.h>
13#include <linux/ceph/buffer.h> 14#include <linux/ceph/buffer.h>
@@ -56,6 +57,7 @@ struct ceph_messenger {
56 struct ceph_entity_addr my_enc_addr; 57 struct ceph_entity_addr my_enc_addr;
57 58
58 atomic_t stopping; 59 atomic_t stopping;
60 possible_net_t net;
59 bool nocrc; 61 bool nocrc;
60 bool tcp_nodelay; 62 bool tcp_nodelay;
61 63
@@ -267,6 +269,7 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
267 u64 required_features, 269 u64 required_features,
268 bool nocrc, 270 bool nocrc,
269 bool tcp_nodelay); 271 bool tcp_nodelay);
272extern void ceph_messenger_fini(struct ceph_messenger *msgr);
270 273
271extern void ceph_con_init(struct ceph_connection *con, void *private, 274extern void ceph_con_init(struct ceph_connection *con, void *private,
272 const struct ceph_connection_operations *ops, 275 const struct ceph_connection_operations *ops,
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index a240b18e86fa..08bffcc466de 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -33,18 +33,19 @@ struct clk_lookup {
33 } 33 }
34 34
35struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, 35struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
36 const char *dev_fmt, ...); 36 const char *dev_fmt, ...) __printf(3, 4);
37 37
38void clkdev_add(struct clk_lookup *cl); 38void clkdev_add(struct clk_lookup *cl);
39void clkdev_drop(struct clk_lookup *cl); 39void clkdev_drop(struct clk_lookup *cl);
40 40
41struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id, 41struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
42 const char *dev_fmt, ...); 42 const char *dev_fmt, ...) __printf(3, 4);
43 43
44void clkdev_add_table(struct clk_lookup *, size_t); 44void clkdev_add_table(struct clk_lookup *, size_t);
45int clk_add_alias(const char *, const char *, const char *, struct device *); 45int clk_add_alias(const char *, const char *, const char *, struct device *);
46 46
47int clk_register_clkdev(struct clk *, const char *, const char *, ...); 47int clk_register_clkdev(struct clk *, const char *, const char *, ...)
48 __printf(3, 4);
48int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t); 49int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
49 50
50#ifdef CONFIG_COMMON_CLK 51#ifdef CONFIG_COMMON_CLK
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ab25814690bc..a76c9172b2eb 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
424 424
425asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 425asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
426 426
427extern int compat_printk(const char *fmt, ...); 427extern __printf(1, 2) int compat_printk(const char *fmt, ...);
428extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat); 428extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
429extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set); 429extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
430 430
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 7f8ad9593da7..e08a6ae7c0a4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -17,11 +17,11 @@
17# define __release(x) __context__(x,-1) 17# define __release(x) __context__(x,-1)
18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 18# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
19# define __percpu __attribute__((noderef, address_space(3))) 19# define __percpu __attribute__((noderef, address_space(3)))
20# define __pmem __attribute__((noderef, address_space(5)))
20#ifdef CONFIG_SPARSE_RCU_POINTER 21#ifdef CONFIG_SPARSE_RCU_POINTER
21# define __rcu __attribute__((noderef, address_space(4))) 22# define __rcu __attribute__((noderef, address_space(4)))
22#else 23#else
23# define __rcu 24# define __rcu
24# define __pmem __attribute__((noderef, address_space(5)))
25#endif 25#endif
26extern void __chk_user_ptr(const volatile void __user *); 26extern void __chk_user_ptr(const volatile void __user *);
27extern void __chk_io_ptr(const volatile void __iomem *); 27extern void __chk_io_ptr(const volatile void __iomem *);
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index c9e5c57e4edf..63a36e89d0eb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,7 +64,8 @@ struct config_item {
64 struct dentry *ci_dentry; 64 struct dentry *ci_dentry;
65}; 65};
66 66
67extern int config_item_set_name(struct config_item *, const char *, ...); 67extern __printf(2, 3)
68int config_item_set_name(struct config_item *, const char *, ...);
68 69
69static inline char *config_item_name(struct config_item * item) 70static inline char *config_item_name(struct config_item * item)
70{ 71{
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 76abba4b238e..dcacb1a72e26 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
340 __u64 mm_reg_addr; 340 __u64 mm_reg_addr;
341}; 341};
342 342
343/* Memory Error Section */ 343/* Old Memory Error Section UEFI 2.1, 2.2 */
344struct cper_sec_mem_err_old {
345 __u64 validation_bits;
346 __u64 error_status;
347 __u64 physical_addr;
348 __u64 physical_addr_mask;
349 __u16 node;
350 __u16 card;
351 __u16 module;
352 __u16 bank;
353 __u16 device;
354 __u16 row;
355 __u16 column;
356 __u16 bit_pos;
357 __u64 requestor_id;
358 __u64 responder_id;
359 __u64 target_id;
360 __u8 error_type;
361};
362
363/* Memory Error Section UEFI >= 2.3 */
344struct cper_sec_mem_err { 364struct cper_sec_mem_err {
345 __u64 validation_bits; 365 __u64 validation_bits;
346 __u64 error_status; 366 __u64 error_status;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c0fb6b1b4712..23c30bdcca86 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
40extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 40extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
42 42
43extern struct device *cpu_device_create(struct device *parent, void *drvdata, 43extern __printf(4, 5)
44 const struct attribute_group **groups, 44struct device *cpu_device_create(struct device *parent, void *drvdata,
45 const char *fmt, ...); 45 const struct attribute_group **groups,
46 const char *fmt, ...);
46#ifdef CONFIG_HOTPLUG_CPU 47#ifdef CONFIG_HOTPLUG_CPU
47extern void unregister_cpu(struct cpu *cpu); 48extern void unregister_cpu(struct cpu *cpu);
48extern ssize_t arch_cpu_probe(const char *, size_t); 49extern ssize_t arch_cpu_probe(const char *, size_t);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 29ad97c34fd5..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,6 +62,7 @@ struct cpufreq_policy {
62 /* CPUs sharing clock, require sw coordination */ 62 /* CPUs sharing clock, require sw coordination */
63 cpumask_var_t cpus; /* Online CPUs only */ 63 cpumask_var_t cpus; /* Online CPUs only */
64 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 64 cpumask_var_t related_cpus; /* Online + Offline CPUs */
65 cpumask_var_t real_cpus; /* Related and present */
65 66
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 67 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 68 should set cpufreq */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d2d50249b7b2..d67ae119cf4e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -327,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry)
327/* 327/*
328 * helper function for dentry_operations.d_dname() members 328 * helper function for dentry_operations.d_dname() members
329 */ 329 */
330extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 330extern __printf(4, 5)
331char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
331extern char *simple_dname(struct dentry *, char *, int); 332extern char *simple_dname(struct dentry *, char *, int);
332 333
333extern char *__d_path(const struct path *, const struct path *, char *, int); 334extern char *__d_path(const struct path *, const struct path *, char *, int);
diff --git a/include/linux/device.h b/include/linux/device.h
index 5a31bf3a4024..a2b4ea70a946 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -637,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id);
637 637
638/* managed devm_k.alloc/kfree for device drivers */ 638/* managed devm_k.alloc/kfree for device drivers */
639extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 639extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
640extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 640extern __printf(3, 0)
641 va_list ap); 641char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
642 va_list ap);
642extern __printf(3, 4) 643extern __printf(3, 4)
643char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...); 644char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
644static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 645static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -1011,12 +1012,10 @@ extern int __must_check device_reprobe(struct device *dev);
1011/* 1012/*
1012 * Easy functions for dynamically creating devices on the fly 1013 * Easy functions for dynamically creating devices on the fly
1013 */ 1014 */
1014extern struct device *device_create_vargs(struct class *cls, 1015extern __printf(5, 0)
1015 struct device *parent, 1016struct device *device_create_vargs(struct class *cls, struct device *parent,
1016 dev_t devt, 1017 dev_t devt, void *drvdata,
1017 void *drvdata, 1018 const char *fmt, va_list vargs);
1018 const char *fmt,
1019 va_list vargs);
1020extern __printf(5, 6) 1019extern __printf(5, 6)
1021struct device *device_create(struct class *cls, struct device *parent, 1020struct device *device_create(struct class *cls, struct device *parent,
1022 dev_t devt, void *drvdata, 1021 dev_t devt, void *drvdata,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a0653e560c26..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
58extern void __init files_init(unsigned long); 58extern void __init files_init(void);
59extern void __init files_maxfiles_init(void);
59 60
60extern struct files_stat_struct files_stat; 61extern struct files_stat_struct files_stat;
61extern unsigned long get_max_files(void); 62extern unsigned long get_max_files(void);
@@ -1046,12 +1047,12 @@ extern void locks_remove_file(struct file *);
1046extern void locks_release_private(struct file_lock *); 1047extern void locks_release_private(struct file_lock *);
1047extern void posix_test_lock(struct file *, struct file_lock *); 1048extern void posix_test_lock(struct file *, struct file_lock *);
1048extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); 1049extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
1049extern int posix_lock_file_wait(struct file *, struct file_lock *); 1050extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
1050extern int posix_unblock_lock(struct file_lock *); 1051extern int posix_unblock_lock(struct file_lock *);
1051extern int vfs_test_lock(struct file *, struct file_lock *); 1052extern int vfs_test_lock(struct file *, struct file_lock *);
1052extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); 1053extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
1053extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); 1054extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1054extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 1055extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
1055extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); 1056extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
1056extern void lease_get_mtime(struct inode *, struct timespec *time); 1057extern void lease_get_mtime(struct inode *, struct timespec *time);
1057extern int generic_setlease(struct file *, long, struct file_lock **, void **priv); 1058extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1137,7 +1138,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1137 return -ENOLCK; 1138 return -ENOLCK;
1138} 1139}
1139 1140
1140static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl) 1141static inline int posix_lock_inode_wait(struct inode *inode,
1142 struct file_lock *fl)
1141{ 1143{
1142 return -ENOLCK; 1144 return -ENOLCK;
1143} 1145}
@@ -1163,8 +1165,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1163 return 0; 1165 return 0;
1164} 1166}
1165 1167
1166static inline int flock_lock_file_wait(struct file *filp, 1168static inline int flock_lock_inode_wait(struct inode *inode,
1167 struct file_lock *request) 1169 struct file_lock *request)
1168{ 1170{
1169 return -ENOLCK; 1171 return -ENOLCK;
1170} 1172}
@@ -1202,6 +1204,20 @@ static inline void show_fd_locks(struct seq_file *f,
1202 struct file *filp, struct files_struct *files) {} 1204 struct file *filp, struct files_struct *files) {}
1203#endif /* !CONFIG_FILE_LOCKING */ 1205#endif /* !CONFIG_FILE_LOCKING */
1204 1206
1207static inline struct inode *file_inode(const struct file *f)
1208{
1209 return f->f_inode;
1210}
1211
1212static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1213{
1214 return posix_lock_inode_wait(file_inode(filp), fl);
1215}
1216
1217static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1218{
1219 return flock_lock_inode_wait(file_inode(filp), fl);
1220}
1205 1221
1206struct fasync_struct { 1222struct fasync_struct {
1207 spinlock_t fa_lock; 1223 spinlock_t fa_lock;
@@ -2011,11 +2027,6 @@ extern void ihold(struct inode * inode);
2011extern void iput(struct inode *); 2027extern void iput(struct inode *);
2012extern int generic_update_time(struct inode *, struct timespec *, int); 2028extern int generic_update_time(struct inode *, struct timespec *, int);
2013 2029
2014static inline struct inode *file_inode(const struct file *f)
2015{
2016 return f->f_inode;
2017}
2018
2019/* /sys/fs */ 2030/* /sys/fs */
2020extern struct kobject *fs_kobj; 2031extern struct kobject *fs_kobj;
2021 2032
@@ -2235,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
2235 2246
2236/* fs/dcache.c */ 2247/* fs/dcache.c */
2237extern void __init vfs_caches_init_early(void); 2248extern void __init vfs_caches_init_early(void);
2238extern void __init vfs_caches_init(unsigned long); 2249extern void __init vfs_caches_init(void);
2239 2250
2240extern struct kmem_cache *names_cachep; 2251extern struct kmem_cache *names_cachep;
2241 2252
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da602982cf9..6cd8c0ee4b6f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
116 * SAVE_REGS. If another ops with this flag set is already registered 116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then 117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip. 118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
119 */ 120 */
120enum { 121enum {
121 FTRACE_OPS_FL_ENABLED = 1 << 0, 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
132 FTRACE_OPS_FL_MODIFYING = 1 << 11, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
134 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14,
135}; 137};
136 138
137#ifdef CONFIG_DYNAMIC_FTRACE 139#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
159 struct ftrace_ops *next; 161 struct ftrace_ops *next;
160 unsigned long flags; 162 unsigned long flags;
161 void *private; 163 void *private;
164 ftrace_func_t saved_func;
162 int __percpu *disabled; 165 int __percpu *disabled;
163#ifdef CONFIG_DYNAMIC_FTRACE 166#ifdef CONFIG_DYNAMIC_FTRACE
164 int nr_trampolines; 167 int nr_trampolines;
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index cc7ec129b329..c8393cd4d44f 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -45,7 +45,7 @@ struct seq_file;
45 * @base: identifies the first GPIO number handled by this chip; 45 * @base: identifies the first GPIO number handled by this chip;
46 * or, if negative during registration, requests dynamic ID allocation. 46 * or, if negative during registration, requests dynamic ID allocation.
47 * DEPRECATION: providing anything non-negative and nailing the base 47 * DEPRECATION: providing anything non-negative and nailing the base
48 * base offset of GPIO chips is deprecated. Please pass -1 as base to 48 * offset of GPIO chips is deprecated. Please pass -1 as base to
49 * let gpiolib select the chip base in all possible cases. We want to 49 * let gpiolib select the chip base in all possible cases. We want to
50 * get rid of the static GPIO number space in the long run. 50 * get rid of the static GPIO number space in the long run.
51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO 51 * @ngpio: the number of GPIOs handled by this controller; the last GPIO
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 0042bf330b99..c02b5ce6c5cd 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -230,6 +230,7 @@ struct hid_sensor_common {
230 struct platform_device *pdev; 230 struct platform_device *pdev;
231 unsigned usage_id; 231 unsigned usage_id;
232 atomic_t data_ready; 232 atomic_t data_ready;
233 atomic_t user_requested_state;
233 struct iio_trigger *trigger; 234 struct iio_trigger *trigger;
234 struct hid_sensor_hub_attribute_info poll; 235 struct hid_sensor_hub_attribute_info poll;
235 struct hid_sensor_hub_attribute_info report_state; 236 struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 205026175c42..d891f949466a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
460 return &mm->page_table_lock; 460 return &mm->page_table_lock;
461} 461}
462 462
463static inline bool hugepages_supported(void) 463#ifndef hugepages_supported
464{ 464/*
465 /* 465 * Some platform decide whether they support huge pages at boot
466 * Some platform decide whether they support huge pages at boot 466 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
467 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when 467 * when there is no such support
468 * there is no such support 468 */
469 */ 469#define hugepages_supported() (HPAGE_SHIFT != 0)
470 return HPAGE_SHIFT != 0; 470#endif
471}
472 471
473#else /* CONFIG_HUGETLB_PAGE */ 472#else /* CONFIG_HUGETLB_PAGE */
474struct hstate {}; 473struct hstate {};
diff --git a/include/linux/init.h b/include/linux/init.h
index 7c68c36d3fd8..b449f378f995 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -282,68 +282,8 @@ void __init parse_early_param(void);
282void __init parse_early_options(char *cmdline); 282void __init parse_early_options(char *cmdline);
283#endif /* __ASSEMBLY__ */ 283#endif /* __ASSEMBLY__ */
284 284
285/**
286 * module_init() - driver initialization entry point
287 * @x: function to be run at kernel boot time or module insertion
288 *
289 * module_init() will either be called during do_initcalls() (if
290 * builtin) or at module insertion time (if a module). There can only
291 * be one per module.
292 */
293#define module_init(x) __initcall(x);
294
295/**
296 * module_exit() - driver exit entry point
297 * @x: function to be run when driver is removed
298 *
299 * module_exit() will wrap the driver clean-up code
300 * with cleanup_module() when used with rmmod when
301 * the driver is a module. If the driver is statically
302 * compiled into the kernel, module_exit() has no effect.
303 * There can only be one per module.
304 */
305#define module_exit(x) __exitcall(x);
306
307#else /* MODULE */ 285#else /* MODULE */
308 286
309/*
310 * In most cases loadable modules do not need custom
311 * initcall levels. There are still some valid cases where
312 * a driver may be needed early if built in, and does not
313 * matter when built as a loadable module. Like bus
314 * snooping debug drivers.
315 */
316#define early_initcall(fn) module_init(fn)
317#define core_initcall(fn) module_init(fn)
318#define core_initcall_sync(fn) module_init(fn)
319#define postcore_initcall(fn) module_init(fn)
320#define postcore_initcall_sync(fn) module_init(fn)
321#define arch_initcall(fn) module_init(fn)
322#define subsys_initcall(fn) module_init(fn)
323#define subsys_initcall_sync(fn) module_init(fn)
324#define fs_initcall(fn) module_init(fn)
325#define fs_initcall_sync(fn) module_init(fn)
326#define rootfs_initcall(fn) module_init(fn)
327#define device_initcall(fn) module_init(fn)
328#define device_initcall_sync(fn) module_init(fn)
329#define late_initcall(fn) module_init(fn)
330#define late_initcall_sync(fn) module_init(fn)
331
332#define console_initcall(fn) module_init(fn)
333#define security_initcall(fn) module_init(fn)
334
335/* Each module must use one module_init(). */
336#define module_init(initfn) \
337 static inline initcall_t __inittest(void) \
338 { return initfn; } \
339 int init_module(void) __attribute__((alias(#initfn)));
340
341/* This is only required if you want to be unloadable. */
342#define module_exit(exitfn) \
343 static inline exitcall_t __exittest(void) \
344 { return exitfn; } \
345 void cleanup_module(void) __attribute__((alias(#exitfn)));
346
347#define __setup_param(str, unique_id, fn) /* nothing */ 287#define __setup_param(str, unique_id, fn) /* nothing */
348#define __setup(str, func) /* nothing */ 288#define __setup(str, func) /* nothing */
349#endif 289#endif
@@ -351,24 +291,6 @@ void __init parse_early_options(char *cmdline);
351/* Data marked not to be saved by software suspend */ 291/* Data marked not to be saved by software suspend */
352#define __nosavedata __section(.data..nosave) 292#define __nosavedata __section(.data..nosave)
353 293
354/* This means "can be init if no module support, otherwise module load
355 may call it." */
356#ifdef CONFIG_MODULES
357#define __init_or_module
358#define __initdata_or_module
359#define __initconst_or_module
360#define __INIT_OR_MODULE .text
361#define __INITDATA_OR_MODULE .data
362#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
363#else
364#define __init_or_module __init
365#define __initdata_or_module __initdata
366#define __initconst_or_module __initconst
367#define __INIT_OR_MODULE __INIT
368#define __INITDATA_OR_MODULE __INITDATA
369#define __INITRODATA_OR_MODULE __INITRODATA
370#endif /*CONFIG_MODULES*/
371
372#ifdef MODULE 294#ifdef MODULE
373#define __exit_p(x) x 295#define __exit_p(x) x
374#else 296#else
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index dc767f7c3704..f9c1b6d0f2e4 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -258,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
258 void *data); 258 void *data);
259struct device *iommu_device_create(struct device *parent, void *drvdata, 259struct device *iommu_device_create(struct device *parent, void *drvdata,
260 const struct attribute_group **groups, 260 const struct attribute_group **groups,
261 const char *fmt, ...); 261 const char *fmt, ...) __printf(4, 5);
262void iommu_device_destroy(struct device *dev); 262void iommu_device_destroy(struct device *dev);
263int iommu_device_link(struct device *dev, struct device *link); 263int iommu_device_link(struct device *dev, struct device *link);
264void iommu_device_unlink(struct device *dev, struct device *link); 264void iommu_device_unlink(struct device *dev, struct device *link);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 92188b0225bb..51744bcf74ee 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
484extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); 484extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
485extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, 485extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
486 void *vcpu_info); 486 void *vcpu_info);
487extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
487#endif 488#endif
488 489
489/* Handling of unhandled and spurious interrupts: */ 490/* Handling of unhandled and spurious interrupts: */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 624a668e61f1..fcea4e48e21f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -87,7 +87,12 @@ struct irq_desc {
87 const char *name; 87 const char *name;
88} ____cacheline_internodealigned_in_smp; 88} ____cacheline_internodealigned_in_smp;
89 89
90#ifndef CONFIG_SPARSE_IRQ 90#ifdef CONFIG_SPARSE_IRQ
91extern void irq_lock_sparse(void);
92extern void irq_unlock_sparse(void);
93#else
94static inline void irq_lock_sparse(void) { }
95static inline void irq_unlock_sparse(void) { }
91extern struct irq_desc irq_desc[NR_IRQS]; 96extern struct irq_desc irq_desc[NR_IRQS];
92#endif 97#endif
93 98
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5f0be58640ea..5582410727cb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -411,7 +411,8 @@ extern __printf(3, 0)
411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); 411int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
412extern __printf(2, 3) 412extern __printf(2, 3)
413char *kasprintf(gfp_t gfp, const char *fmt, ...); 413char *kasprintf(gfp_t gfp, const char *fmt, ...);
414extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); 414extern __printf(2, 0)
415char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
415 416
416extern __scanf(2, 3) 417extern __scanf(2, 3)
417int sscanf(const char *, const char *, ...); 418int sscanf(const char *, const char *, ...);
@@ -679,10 +680,10 @@ do { \
679 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ 680 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
680} while (0) 681} while (0)
681 682
682extern int 683extern __printf(2, 0) int
683__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); 684__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
684 685
685extern int 686extern __printf(2, 0) int
686__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); 687__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
687 688
688extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); 689extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -702,7 +703,7 @@ int trace_printk(const char *fmt, ...)
702{ 703{
703 return 0; 704 return 0;
704} 705}
705static inline int 706static __printf(1, 0) inline int
706ftrace_vprintk(const char *fmt, va_list ap) 707ftrace_vprintk(const char *fmt, va_list ap)
707{ 708{
708 return 0; 709 return 0;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 2d61b909f414..637f67002c5a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -80,8 +80,9 @@ struct kobject {
80 80
81extern __printf(2, 3) 81extern __printf(2, 3)
82int kobject_set_name(struct kobject *kobj, const char *name, ...); 82int kobject_set_name(struct kobject *kobj, const char *name, ...);
83extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, 83extern __printf(2, 0)
84 va_list vargs); 84int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
85 va_list vargs);
85 86
86static inline const char *kobject_name(const struct kobject *kobj) 87static inline const char *kobject_name(const struct kobject *kobj)
87{ 88{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9564fd78c547..05e99b8ef465 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
734 return false; 734 return false;
735} 735}
736#endif 736#endif
737#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
738void kvm_arch_start_assignment(struct kvm *kvm);
739void kvm_arch_end_assignment(struct kvm *kvm);
740bool kvm_arch_has_assigned_device(struct kvm *kvm);
741#else
742static inline void kvm_arch_start_assignment(struct kvm *kvm)
743{
744}
745
746static inline void kvm_arch_end_assignment(struct kvm *kvm)
747{
748}
749
750static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
751{
752 return false;
753}
754#endif
737 755
738static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) 756static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
739{ 757{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 36ce37bcc963..c9cfbcdb8d14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -431,6 +431,8 @@ enum {
431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ 433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
434 ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
435 ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
434 436
435 /* DMA mask for user DMA control: User visible values; DO NOT 437 /* DMA mask for user DMA control: User visible values; DO NOT
436 renumber */ 438 renumber */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2e872f92dbac..bf6f117fcf4d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1003,6 +1003,34 @@ static inline int page_mapped(struct page *page)
1003} 1003}
1004 1004
1005/* 1005/*
1006 * Return true only if the page has been allocated with
1007 * ALLOC_NO_WATERMARKS and the low watermark was not
1008 * met implying that the system is under some pressure.
1009 */
1010static inline bool page_is_pfmemalloc(struct page *page)
1011{
1012 /*
1013 * Page index cannot be this large so this must be
1014 * a pfmemalloc page.
1015 */
1016 return page->index == -1UL;
1017}
1018
1019/*
1020 * Only to be called by the page allocator on a freshly allocated
1021 * page.
1022 */
1023static inline void set_page_pfmemalloc(struct page *page)
1024{
1025 page->index = -1UL;
1026}
1027
1028static inline void clear_page_pfmemalloc(struct page *page)
1029{
1030 page->index = 0;
1031}
1032
1033/*
1006 * Different kinds of faults, as returned by handle_mm_fault(). 1034 * Different kinds of faults, as returned by handle_mm_fault().
1007 * Used to decide whether a process gets delivered SIGBUS or 1035 * Used to decide whether a process gets delivered SIGBUS or
1008 * just gets major/minor fault counters bumped up. 1036 * just gets major/minor fault counters bumped up.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0038ac7466fd..15549578d559 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -63,15 +63,6 @@ struct page {
63 union { 63 union {
64 pgoff_t index; /* Our offset within mapping. */ 64 pgoff_t index; /* Our offset within mapping. */
65 void *freelist; /* sl[aou]b first free object */ 65 void *freelist; /* sl[aou]b first free object */
66 bool pfmemalloc; /* If set by the page allocator,
67 * ALLOC_NO_WATERMARKS was set
68 * and the low watermark was not
69 * met implying that the system
70 * is under some pressure. The
71 * caller should try ensure
72 * this page is only used to
73 * free other pages.
74 */
75 }; 66 };
76 67
77 union { 68 union {
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index c5d52780d6a0..3ba327af055c 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
106extern void disable_mmiotrace(void); 106extern void disable_mmiotrace(void);
107extern void mmio_trace_rw(struct mmiotrace_rw *rw); 107extern void mmio_trace_rw(struct mmiotrace_rw *rw);
108extern void mmio_trace_mapping(struct mmiotrace_map *map); 108extern void mmio_trace_mapping(struct mmiotrace_map *map);
109extern int mmio_trace_printk(const char *fmt, va_list args); 109extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
110 110
111#endif /* _LINUX_MMIOTRACE_H */ 111#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8183d6640ca7..34f25b7bf642 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -189,6 +189,8 @@ struct css_device_id {
189struct acpi_device_id { 189struct acpi_device_id {
190 __u8 id[ACPI_ID_LEN]; 190 __u8 id[ACPI_ID_LEN];
191 kernel_ulong_t driver_data; 191 kernel_ulong_t driver_data;
192 __u32 cls;
193 __u32 cls_msk;
192}; 194};
193 195
194#define PNP_ID_LEN 8 196#define PNP_ID_LEN 8
diff --git a/include/linux/module.h b/include/linux/module.h
index d67b1932cc59..3a19c79918e0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,6 +11,7 @@
11#include <linux/compiler.h> 11#include <linux/compiler.h>
12#include <linux/cache.h> 12#include <linux/cache.h>
13#include <linux/kmod.h> 13#include <linux/kmod.h>
14#include <linux/init.h>
14#include <linux/elf.h> 15#include <linux/elf.h>
15#include <linux/stringify.h> 16#include <linux/stringify.h>
16#include <linux/kobject.h> 17#include <linux/kobject.h>
@@ -71,6 +72,89 @@ extern struct module_attribute module_uevent;
71extern int init_module(void); 72extern int init_module(void);
72extern void cleanup_module(void); 73extern void cleanup_module(void);
73 74
75#ifndef MODULE
76/**
77 * module_init() - driver initialization entry point
78 * @x: function to be run at kernel boot time or module insertion
79 *
80 * module_init() will either be called during do_initcalls() (if
81 * builtin) or at module insertion time (if a module). There can only
82 * be one per module.
83 */
84#define module_init(x) __initcall(x);
85
86/**
87 * module_exit() - driver exit entry point
88 * @x: function to be run when driver is removed
89 *
90 * module_exit() will wrap the driver clean-up code
91 * with cleanup_module() when used with rmmod when
92 * the driver is a module. If the driver is statically
93 * compiled into the kernel, module_exit() has no effect.
94 * There can only be one per module.
95 */
96#define module_exit(x) __exitcall(x);
97
98#else /* MODULE */
99
100/*
101 * In most cases loadable modules do not need custom
102 * initcall levels. There are still some valid cases where
103 * a driver may be needed early if built in, and does not
104 * matter when built as a loadable module. Like bus
105 * snooping debug drivers.
106 */
107#define early_initcall(fn) module_init(fn)
108#define core_initcall(fn) module_init(fn)
109#define core_initcall_sync(fn) module_init(fn)
110#define postcore_initcall(fn) module_init(fn)
111#define postcore_initcall_sync(fn) module_init(fn)
112#define arch_initcall(fn) module_init(fn)
113#define subsys_initcall(fn) module_init(fn)
114#define subsys_initcall_sync(fn) module_init(fn)
115#define fs_initcall(fn) module_init(fn)
116#define fs_initcall_sync(fn) module_init(fn)
117#define rootfs_initcall(fn) module_init(fn)
118#define device_initcall(fn) module_init(fn)
119#define device_initcall_sync(fn) module_init(fn)
120#define late_initcall(fn) module_init(fn)
121#define late_initcall_sync(fn) module_init(fn)
122
123#define console_initcall(fn) module_init(fn)
124#define security_initcall(fn) module_init(fn)
125
126/* Each module must use one module_init(). */
127#define module_init(initfn) \
128 static inline initcall_t __inittest(void) \
129 { return initfn; } \
130 int init_module(void) __attribute__((alias(#initfn)));
131
132/* This is only required if you want to be unloadable. */
133#define module_exit(exitfn) \
134 static inline exitcall_t __exittest(void) \
135 { return exitfn; } \
136 void cleanup_module(void) __attribute__((alias(#exitfn)));
137
138#endif
139
140/* This means "can be init if no module support, otherwise module load
141 may call it." */
142#ifdef CONFIG_MODULES
143#define __init_or_module
144#define __initdata_or_module
145#define __initconst_or_module
146#define __INIT_OR_MODULE .text
147#define __INITDATA_OR_MODULE .data
148#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
149#else
150#define __init_or_module __init
151#define __initdata_or_module __initdata
152#define __initconst_or_module __initconst
153#define __INIT_OR_MODULE __INIT
154#define __INITDATA_OR_MODULE __INITDATA
155#define __INITRODATA_OR_MODULE __INITRODATA
156#endif /*CONFIG_MODULES*/
157
74/* Archs provide a method of finding the correct exception table. */ 158/* Archs provide a method of finding the correct exception table. */
75struct exception_table_entry; 159struct exception_table_entry;
76 160
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index f25e2bdd188c..272f42952f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -178,17 +178,17 @@ typedef enum {
178/* Chip may not exist, so silence any errors in scan */ 178/* Chip may not exist, so silence any errors in scan */
179#define NAND_SCAN_SILENT_NODEV 0x00040000 179#define NAND_SCAN_SILENT_NODEV 0x00040000
180/* 180/*
181 * This option could be defined by controller drivers to protect against
182 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
183 */
184#define NAND_USE_BOUNCE_BUFFER 0x00080000
185/*
186 * Autodetect nand buswidth with readid/onfi. 181 * Autodetect nand buswidth with readid/onfi.
187 * This suppose the driver will configure the hardware in 8 bits mode 182 * This suppose the driver will configure the hardware in 8 bits mode
188 * when calling nand_scan_ident, and update its configuration 183 * when calling nand_scan_ident, and update its configuration
189 * before calling nand_scan_tail. 184 * before calling nand_scan_tail.
190 */ 185 */
191#define NAND_BUSWIDTH_AUTO 0x00080000 186#define NAND_BUSWIDTH_AUTO 0x00080000
187/*
188 * This option could be defined by controller drivers to protect against
189 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
190 */
191#define NAND_USE_BOUNCE_BUFFER 0x00100000
192 192
193/* Options set by nand scan */ 193/* Options set by nand scan */
194/* Nand scan has allocated controller struct */ 194/* Nand scan has allocated controller struct */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f91b5ade30c9..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
292 struct nfs_inode *nfsi = NFS_I(inode); 292 struct nfs_inode *nfsi = NFS_I(inode);
293 293
294 spin_lock(&inode->i_lock); 294 spin_lock(&inode->i_lock);
295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
296 NFS_INO_REVAL_PAGECACHE |
297 NFS_INO_INVALID_ACCESS |
298 NFS_INO_INVALID_ACL;
296 if (S_ISDIR(inode->i_mode)) 299 if (S_ISDIR(inode->i_mode))
297 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 300 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
298 spin_unlock(&inode->i_lock); 301 spin_unlock(&inode->i_lock);
299} 302}
300 303
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a2ea1491d3df..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
220#define NFS_CAP_SYMLINKS (1U << 2) 220#define NFS_CAP_SYMLINKS (1U << 2)
221#define NFS_CAP_ACLS (1U << 3) 221#define NFS_CAP_ACLS (1U << 3)
222#define NFS_CAP_ATOMIC_OPEN (1U << 4) 222#define NFS_CAP_ATOMIC_OPEN (1U << 4)
223#define NFS_CAP_CHANGE_ATTR (1U << 5) 223/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
224#define NFS_CAP_FILEID (1U << 6) 224#define NFS_CAP_FILEID (1U << 6)
225#define NFS_CAP_MODE (1U << 7) 225#define NFS_CAP_MODE (1U << 7)
226#define NFS_CAP_NLINK (1U << 8) 226#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 4c508549833a..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
59#else /* CONFIG_OF */ 59#else /* CONFIG_OF */
60 60
61static inline int of_driver_match_device(struct device *dev, 61static inline int of_driver_match_device(struct device *dev,
62 struct device_driver *drv) 62 const struct device_driver *drv)
63{ 63{
64 return 0; 64 return 0;
65} 65}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
631 1 << PG_private | 1 << PG_private_2 | \ 631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \ 632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ 634 1 << PG_unevictable | __PG_MLOCKED | \
635 __PG_COMPOUND_LOCK) 635 __PG_COMPOUND_LOCK)
636 636
637/* 637/*
638 * Flags checked when a page is prepped for return by the page allocator. 638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set, 639 * Pages being prepped should not have these flags set. It they are set,
640 * there has been a kernel bug or struct page corruption. 640 * there has been a kernel bug or struct page corruption.
641 *
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
641 */ 644 */
642#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 645#define PAGE_FLAGS_CHECK_AT_PREP \
646 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
643 647
644#define PAGE_FLAGS_PRIVATE \ 648#define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2) 649 (1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index b48c3471c254..cacaabea8a09 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
8extern void __reset_page_owner(struct page *page, unsigned int order); 8extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page, 9extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask); 10 unsigned int order, gfp_t gfp_mask);
11extern gfp_t __get_page_owner_gfp(struct page *page);
11 12
12static inline void reset_page_owner(struct page *page, unsigned int order) 13static inline void reset_page_owner(struct page *page, unsigned int order)
13{ 14{
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
25 26
26 __set_page_owner(page, order, gfp_mask); 27 __set_page_owner(page, order, gfp_mask);
27} 28}
29
30static inline gfp_t get_page_owner_gfp(struct page *page)
31{
32 if (likely(!page_owner_inited))
33 return 0;
34
35 return __get_page_owner_gfp(page);
36}
28#else 37#else
29static inline void reset_page_owner(struct page *page, unsigned int order) 38static inline void reset_page_owner(struct page *page, unsigned int order)
30{ 39{
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
33 unsigned int order, gfp_t gfp_mask) 42 unsigned int order, gfp_t gfp_mask)
34{ 43{
35} 44}
45static inline gfp_t get_page_owner_gfp(struct page *page)
46{
47 return 0;
48}
36 49
37#endif /* CONFIG_PAGE_OWNER */ 50#endif /* CONFIG_PAGE_OWNER */
38#endif /* __LINUX_PAGE_OWNER_H */ 51#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index 3cc21c9cc1e8..9fade5dd2e86 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller platform data header file 4 * Arasan Compact Flash host controller platform data header file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com> 7 * Viresh Kumar <vireshk@kernel.org>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
8#ifndef __MACB_PDATA_H__ 8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__ 9#define __MACB_PDATA_H__
10 10
11/**
12 * struct macb_platform_data - platform data for MACB Ethernet
13 * @phy_mask: phy mask passed when register the MDIO bus
14 * within the driver
15 * @phy_irq_pin: PHY IRQ
16 * @is_rmii: using RMII interface?
17 * @rev_eth_addr: reverse Ethernet address byte order
18 */
11struct macb_platform_data { 19struct macb_platform_data {
12 u32 phy_mask; 20 u32 phy_mask;
13 int phy_irq_pin; /* PHY IRQ */ 21 int phy_irq_pin;
14 u8 is_rmii; /* using RMII interface? */ 22 u8 is_rmii;
15 u8 rev_eth_addr; /* reverse Ethernet address byte order */ 23 u8 rev_eth_addr;
16}; 24};
17 25
18#endif /* __MACB_PDATA_H__ */ 26#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac137..e1571efa3f2b 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
43 enum wp_types wp_type; 43 enum wp_types wp_type;
44 enum cd_types cd_type; 44 enum cd_types cd_type;
45 int max_bus_width; 45 int max_bus_width;
46 unsigned int f_max;
47 bool support_vsel; 46 bool support_vsel;
48 unsigned int delay_line; 47 unsigned int delay_line;
49}; 48};
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 58b1fec40d37..a6298b27ac99 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -122,7 +122,7 @@ static inline __printf(1, 2) __cold
122void early_printk(const char *s, ...) { } 122void early_printk(const char *s, ...) { }
123#endif 123#endif
124 124
125typedef int(*printk_func_t)(const char *fmt, va_list args); 125typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
126 126
127#ifdef CONFIG_PRINTK 127#ifdef CONFIG_PRINTK
128asmlinkage __printf(5, 0) 128asmlinkage __printf(5, 0)
@@ -166,7 +166,7 @@ char *log_buf_addr_get(void);
166u32 log_buf_len_get(void); 166u32 log_buf_len_get(void);
167void log_buf_kexec_setup(void); 167void log_buf_kexec_setup(void);
168void __init setup_log_buf(int early); 168void __init setup_log_buf(int early);
169void dump_stack_set_arch_desc(const char *fmt, ...); 169__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
170void dump_stack_print_info(const char *log_lvl); 170void dump_stack_print_info(const char *log_lvl);
171void show_regs_print_info(const char *log_lvl); 171void show_regs_print_info(const char *log_lvl);
172#else 172#else
@@ -217,7 +217,7 @@ static inline void setup_log_buf(int early)
217{ 217{
218} 218}
219 219
220static inline void dump_stack_set_arch_desc(const char *fmt, ...) 220static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
221{ 221{
222} 222}
223 223
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea0f0b5..4a6759098769 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -50,6 +50,17 @@ struct reg_default {
50 unsigned int def; 50 unsigned int def;
51}; 51};
52 52
53/**
54 * Register/value pairs for sequences of writes
55 *
56 * @reg: Register address.
57 * @def: Register value.
58 */
59struct reg_sequence {
60 unsigned int reg;
61 unsigned int def;
62};
63
53#ifdef CONFIG_REGMAP 64#ifdef CONFIG_REGMAP
54 65
55enum regmap_endian { 66enum regmap_endian {
@@ -410,10 +421,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
410 const void *val, size_t val_len); 421 const void *val, size_t val_len);
411int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, 422int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
412 size_t val_count); 423 size_t val_count);
413int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs, 424int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
414 int num_regs); 425 int num_regs);
415int regmap_multi_reg_write_bypassed(struct regmap *map, 426int regmap_multi_reg_write_bypassed(struct regmap *map,
416 const struct reg_default *regs, 427 const struct reg_sequence *regs,
417 int num_regs); 428 int num_regs);
418int regmap_raw_write_async(struct regmap *map, unsigned int reg, 429int regmap_raw_write_async(struct regmap *map, unsigned int reg,
419 const void *val, size_t val_len); 430 const void *val, size_t val_len);
@@ -424,6 +435,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
424 size_t val_count); 435 size_t val_count);
425int regmap_update_bits(struct regmap *map, unsigned int reg, 436int regmap_update_bits(struct regmap *map, unsigned int reg,
426 unsigned int mask, unsigned int val); 437 unsigned int mask, unsigned int val);
438int regmap_write_bits(struct regmap *map, unsigned int reg,
439 unsigned int mask, unsigned int val);
427int regmap_update_bits_async(struct regmap *map, unsigned int reg, 440int regmap_update_bits_async(struct regmap *map, unsigned int reg,
428 unsigned int mask, unsigned int val); 441 unsigned int mask, unsigned int val);
429int regmap_update_bits_check(struct regmap *map, unsigned int reg, 442int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -450,7 +463,7 @@ void regcache_mark_dirty(struct regmap *map);
450bool regmap_check_range_table(struct regmap *map, unsigned int reg, 463bool regmap_check_range_table(struct regmap *map, unsigned int reg,
451 const struct regmap_access_table *table); 464 const struct regmap_access_table *table);
452 465
453int regmap_register_patch(struct regmap *map, const struct reg_default *regs, 466int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
454 int num_regs); 467 int num_regs);
455int regmap_parse_val(struct regmap *map, const void *buf, 468int regmap_parse_val(struct regmap *map, const void *buf,
456 unsigned int *val); 469 unsigned int *val);
@@ -503,6 +516,8 @@ int regmap_field_update_bits(struct regmap_field *field,
503 516
504int regmap_fields_write(struct regmap_field *field, unsigned int id, 517int regmap_fields_write(struct regmap_field *field, unsigned int id,
505 unsigned int val); 518 unsigned int val);
519int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
520 unsigned int val);
506int regmap_fields_read(struct regmap_field *field, unsigned int id, 521int regmap_fields_read(struct regmap_field *field, unsigned int id,
507 unsigned int *val); 522 unsigned int *val);
508int regmap_fields_update_bits(struct regmap_field *field, unsigned int id, 523int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
@@ -645,6 +660,13 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
645 return -EINVAL; 660 return -EINVAL;
646} 661}
647 662
663static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
664 unsigned int mask, unsigned int val)
665{
666 WARN_ONCE(1, "regmap API is disabled");
667 return -EINVAL;
668}
669
648static inline int regmap_update_bits_async(struct regmap *map, 670static inline int regmap_update_bits_async(struct regmap *map,
649 unsigned int reg, 671 unsigned int reg,
650 unsigned int mask, unsigned int val) 672 unsigned int mask, unsigned int val)
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
index 2c92e1c8e055..aefd997262e4 100644
--- a/include/linux/rtc/sirfsoc_rtciobrg.h
+++ b/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -9,10 +9,14 @@
9#ifndef _SIRFSOC_RTC_IOBRG_H_ 9#ifndef _SIRFSOC_RTC_IOBRG_H_
10#define _SIRFSOC_RTC_IOBRG_H_ 10#define _SIRFSOC_RTC_IOBRG_H_
11 11
12struct regmap_config;
13
12extern void sirfsoc_rtc_iobrg_besyncing(void); 14extern void sirfsoc_rtc_iobrg_besyncing(void);
13 15
14extern u32 sirfsoc_rtc_iobrg_readl(u32 addr); 16extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
15 17
16extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr); 18extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
19struct regmap *devm_regmap_init_iobg(struct device *dev,
20 const struct regmap_config *config);
17 21
18#endif 22#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ae21f1591615..04b5ada460b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1522,8 +1522,6 @@ struct task_struct {
1522/* hung task detection */ 1522/* hung task detection */
1523 unsigned long last_switch_count; 1523 unsigned long last_switch_count;
1524#endif 1524#endif
1525/* CPU-specific state of this task */
1526 struct thread_struct thread;
1527/* filesystem information */ 1525/* filesystem information */
1528 struct fs_struct *fs; 1526 struct fs_struct *fs;
1529/* open file information */ 1527/* open file information */
@@ -1778,8 +1776,22 @@ struct task_struct {
1778 unsigned long task_state_change; 1776 unsigned long task_state_change;
1779#endif 1777#endif
1780 int pagefault_disabled; 1778 int pagefault_disabled;
1779/* CPU-specific state of this task */
1780 struct thread_struct thread;
1781/*
1782 * WARNING: on x86, 'thread_struct' contains a variable-sized
1783 * structure. It *MUST* be at the end of 'task_struct'.
1784 *
1785 * Do not put anything below here!
1786 */
1781}; 1787};
1782 1788
1789#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1790extern int arch_task_struct_size __read_mostly;
1791#else
1792# define arch_task_struct_size (sizeof(struct task_struct))
1793#endif
1794
1783/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1795/* Future-safe accessor for struct task_struct's cpus_allowed. */
1784#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) 1796#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1785 1797
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e87d53..9b88536487e6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1602 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1603 1603
1604 /* 1604 /*
1605 * Propagate page->pfmemalloc to the skb if we can. The problem is 1605 * Propagate page pfmemalloc to the skb if we can. The problem is
1606 * that not all callers have unique ownership of the page. If 1606 * that not all callers have unique ownership of the page but rely
1607 * pfmemalloc is set, we check the mapping as a mapping implies 1607 * on page_is_pfmemalloc doing the right thing(tm).
1608 * page->index is set (index and pfmemalloc share space).
1609 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1610 * do not lose pfmemalloc information as the pages would not be
1611 * allocated using __GFP_MEMALLOC.
1612 */ 1608 */
1613 frag->page.p = page; 1609 frag->page.p = page;
1614 frag->page_offset = off; 1610 frag->page_offset = off;
1615 skb_frag_size_set(frag, size); 1611 skb_frag_size_set(frag, size);
1616 1612
1617 page = compound_head(page); 1613 page = compound_head(page);
1618 if (page->pfmemalloc && !page->mapping) 1614 if (page_is_pfmemalloc(page))
1619 skb->pfmemalloc = true; 1615 skb->pfmemalloc = true;
1620} 1616}
1621 1617
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
2263static inline void skb_propagate_pfmemalloc(struct page *page, 2259static inline void skb_propagate_pfmemalloc(struct page *page,
2264 struct sk_buff *skb) 2260 struct sk_buff *skb)
2265{ 2261{
2266 if (page && page->pfmemalloc) 2262 if (page_is_pfmemalloc(page))
2267 skb->pfmemalloc = true; 2263 skb->pfmemalloc = true;
2268} 2264}
2269 2265
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2884 * 2880 *
2885 * PHY drivers may accept clones of transmitted packets for 2881 * PHY drivers may accept clones of transmitted packets for
2886 * timestamping via their phy_driver.txtstamp method. These drivers 2882 * timestamping via their phy_driver.txtstamp method. These drivers
2887 * must call this function to return the skb back to the stack, with 2883 * must call this function to return the skb back to the stack with a
2888 * or without a timestamp. 2884 * timestamp.
2889 * 2885 *
2890 * @skb: clone of the the original outgoing packet 2886 * @skb: clone of the the original outgoing packet
2891 * @hwtstamps: hardware time stamps, may be NULL if not available 2887 * @hwtstamps: hardware time stamps
2892 * 2888 *
2893 */ 2889 */
2894void skb_complete_tx_timestamp(struct sk_buff *skb, 2890void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 3741ba1a652c..edbfc9a5293e 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -67,10 +67,13 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } 67static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
68#endif /* BROADCAST */ 68#endif /* BROADCAST */
69 69
70#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 70#ifdef CONFIG_GENERIC_CLOCKEVENTS
71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); 71extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
72#else 72#else
73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; } 73static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
74{
75 return 0;
76}
74#endif 77#endif
75 78
76static inline void tick_broadcast_enable(void) 79static inline void tick_broadcast_enable(void)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 3aa72e648650..6e191e4e6ab6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -145,7 +145,6 @@ static inline void getboottime(struct timespec *ts)
145} 145}
146#endif 146#endif
147 147
148#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
149#define ktime_get_real_ts64(ts) getnstimeofday64(ts) 148#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
150 149
151/* 150/*
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index 7c9b484735c5..1f6526c76ee8 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -80,6 +80,9 @@
80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL 80#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC) 81#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
82 82
83/* Driver flags */
84#define CDC_NCM_FLAG_NDP_TO_END 0x02 /* NDP is placed at end of frame */
85
83#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ 86#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
84 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) 87 (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
85#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) 88#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
@@ -103,9 +106,11 @@ struct cdc_ncm_ctx {
103 106
104 spinlock_t mtx; 107 spinlock_t mtx;
105 atomic_t stop; 108 atomic_t stop;
109 int drvflags;
106 110
107 u32 timer_interval; 111 u32 timer_interval;
108 u32 max_ndp_size; 112 u32 max_ndp_size;
113 struct usb_cdc_ncm_ndp16 *delayed_ndp16;
109 114
110 u32 tx_timer_pending; 115 u32 tx_timer_pending;
111 u32 tx_curr_frame_num; 116 u32 tx_curr_frame_num;
@@ -133,7 +138,7 @@ struct cdc_ncm_ctx {
133}; 138};
134 139
135u8 cdc_ncm_select_altsetting(struct usb_interface *intf); 140u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
136int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); 141int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
137void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); 142void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
138struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); 143struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
139int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in); 144int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 45534da57759..644bdc61c387 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,8 +74,6 @@ enum rc_filter_type {
74 * @input_dev: the input child device used to communicate events to userspace 74 * @input_dev: the input child device used to communicate events to userspace
75 * @driver_type: specifies if protocol decoding is done in hardware or software 75 * @driver_type: specifies if protocol decoding is done in hardware or software
76 * @idle: used to keep track of RX state 76 * @idle: used to keep track of RX state
77 * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
78 * wakeup protocols is the set of all raw encoders
79 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols 77 * @allowed_protocols: bitmask with the supported RC_BIT_* protocols
80 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols 78 * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
81 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols 79 * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
136 struct input_dev *input_dev; 134 struct input_dev *input_dev;
137 enum rc_driver_type driver_type; 135 enum rc_driver_type driver_type;
138 bool idle; 136 bool idle;
139 bool encode_wakeup;
140 u64 allowed_protocols; 137 u64 allowed_protocols;
141 u64 enabled_protocols; 138 u64 enabled_protocols;
142 u64 allowed_wakeup_protocols; 139 u64 allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
246#define US_TO_NS(usec) ((usec) * 1000) 243#define US_TO_NS(usec) ((usec) * 1000)
247#define MS_TO_US(msec) ((msec) * 1000) 244#define MS_TO_US(msec) ((msec) * 1000)
248#define MS_TO_NS(msec) ((msec) * 1000 * 1000) 245#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
249#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
250 246
251void ir_raw_event_handle(struct rc_dev *dev); 247void ir_raw_event_handle(struct rc_dev *dev);
252int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); 248int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
254int ir_raw_event_store_with_filter(struct rc_dev *dev, 250int ir_raw_event_store_with_filter(struct rc_dev *dev,
255 struct ir_raw_event *ev); 251 struct ir_raw_event *ev);
256void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); 252void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
257int ir_raw_encode_scancode(u64 protocols,
258 const struct rc_scancode_filter *scancode,
259 struct ir_raw_event *events, unsigned int max);
260 253
261static inline void ir_raw_event_reset(struct rc_dev *dev) 254static inline void ir_raw_event_reset(struct rc_dev *dev)
262{ 255{
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 22a44c2f5963..c192e1b46cdc 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -139,6 +139,7 @@ enum vb2_io_modes {
139 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf 139 * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
140 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver 140 * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver 141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
142 * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
142 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used 143 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
143 * in a hardware operation 144 * in a hardware operation
144 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but 145 * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
152 VB2_BUF_STATE_PREPARING, 153 VB2_BUF_STATE_PREPARING,
153 VB2_BUF_STATE_PREPARED, 154 VB2_BUF_STATE_PREPARED,
154 VB2_BUF_STATE_QUEUED, 155 VB2_BUF_STATE_QUEUED,
156 VB2_BUF_STATE_REQUEUEING,
155 VB2_BUF_STATE_ACTIVE, 157 VB2_BUF_STATE_ACTIVE,
156 VB2_BUF_STATE_DONE, 158 VB2_BUF_STATE_DONE,
157 VB2_BUF_STATE_ERROR, 159 VB2_BUF_STATE_ERROR,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
99 99
100int tcf_hash_search(struct tc_action *a, u32 index); 100int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a); 101void tcf_hash_destroy(struct tc_action *a);
102int tcf_hash_release(struct tc_action *a, int bind);
103u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
104int tcf_hash_check(u32 index, struct tc_action *a, int bind); 103int tcf_hash_check(u32 index, struct tc_action *a, int bind);
105int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
107void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
108void tcf_hash_insert(struct tc_action *a); 107void tcf_hash_insert(struct tc_action *a);
109 108
109int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
110
111static inline int tcf_hash_release(struct tc_action *a, bool bind)
112{
113 return __tcf_hash_release(a, bind, false);
114}
115
110int tcf_register_action(struct tc_action_ops *a, unsigned int mask); 116int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
111int tcf_unregister_action(struct tc_action_ops *a); 117int tcf_unregister_action(struct tc_action_ops *a);
112int tcf_action_destroy(struct list_head *actions, int bind); 118int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a741678f24a2..883fe1e7c5a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
4868 struct cfg80211_chan_def *chandef, 4868 struct cfg80211_chan_def *chandef,
4869 enum nl80211_iftype iftype); 4869 enum nl80211_iftype iftype);
4870 4870
4871/**
4872 * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
4873 * @wiphy: the wiphy
4874 * @chandef: the channel definition
4875 * @iftype: interface type
4876 *
4877 * Return: %true if there is no secondary channel or the secondary channel(s)
4878 * can be used for beaconing (i.e. is not a radar channel etc.). This version
4879 * also checks if IR-relaxation conditions apply, to allow beaconing under
4880 * more permissive conditions.
4881 *
4882 * Requires the RTNL to be held.
4883 */
4884bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
4885 struct cfg80211_chan_def *chandef,
4886 enum nl80211_iftype iftype);
4887
4871/* 4888/*
4872 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
4873 * @dev: the device which switched channels 4890 * @dev: the device which switched channels
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e1300b3dd597..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
21 * @INET_FRAG_FIRST_IN: first fragment has arrived 21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived 22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction 23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */ 24 */
26enum { 25enum {
27 INET_FRAG_FIRST_IN = BIT(0), 26 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1), 27 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2), 28 INET_FRAG_COMPLETE = BIT(2),
30 INET_FRAG_EVICTED = BIT(3)
31}; 29};
32 30
33/** 31/**
@@ -45,6 +43,7 @@ enum {
45 * @flags: fragment queue flags 43 * @flags: fragment queue flags
46 * @max_size: maximum received fragment size 44 * @max_size: maximum received fragment size
47 * @net: namespace that this frag belongs to 45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */ 47 */
49struct inet_frag_queue { 48struct inet_frag_queue {
50 spinlock_t lock; 49 spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
59 __u8 flags; 58 __u8 flags;
60 u16 max_size; 59 u16 max_size;
61 struct netns_frags *net; 60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62}; 62};
63 63
64#define INETFRAGS_HASHSZ 1024 64#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
125 inet_frag_destroy(q, f); 125 inet_frag_destroy(q, f);
126} 126}
127 127
128static inline bool inet_frag_evicting(struct inet_frag_queue *q)
129{
130 return !hlist_unhashed(&q->list_evictor);
131}
132
128/* Memory Tracking Functions. */ 133/* Memory Tracking Functions. */
129 134
130/* The default percpu_counter batch size is not big enough to scale to 135/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
139 return percpu_counter_read(&nf->mem); 144 return percpu_counter_read(&nf->mem);
140} 145}
141 146
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 147static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
143{ 148{
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 149 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
145} 150}
146 151
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 152static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
148{ 153{
149 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
150} 155}
151 156
152static inline void init_frag_mem_limit(struct netns_frags *nf) 157static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/ip.h b/include/net/ip.h
index 0750a186ea63..d5fe9f2ab699 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
161} 161}
162 162
163/* datagram.c */ 163/* datagram.c */
164int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
164int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
165 166
166void ip4_datagram_release_cb(struct sock *sk); 167void ip4_datagram_release_cb(struct sock *sk);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 49c142bdf01e..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
183struct fib_table { 183struct fib_table {
184 struct hlist_node tb_hlist; 184 struct hlist_node tb_hlist;
185 u32 tb_id; 185 u32 tb_id;
186 int tb_default;
187 int tb_num_default; 186 int tb_num_default;
188 struct rcu_head rcu; 187 struct rcu_head rcu;
189 unsigned long *tb_data; 188 unsigned long *tb_data;
@@ -290,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
290int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 289int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
291 u8 tos, int oif, struct net_device *dev, 290 u8 tos, int oif, struct net_device *dev,
292 struct in_device *idev, u32 *itag); 291 struct in_device *idev, u32 *itag);
293void fib_select_default(struct fib_result *res); 292void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
294#ifdef CONFIG_IP_ROUTE_CLASSID 293#ifdef CONFIG_IP_ROUTE_CLASSID
295static inline int fib_num_tclassid_users(struct net *net) 294static inline int fib_num_tclassid_users(struct net *net)
296{ 295{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 291extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 292void init_nf_conntrack_hash_rnd(void);
293 293
294void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); 294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
295 295
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
68 spinlock_t lock; 68 spinlock_t lock;
69 struct hlist_nulls_head unconfirmed; 69 struct hlist_nulls_head unconfirmed;
70 struct hlist_nulls_head dying; 70 struct hlist_nulls_head dying;
71 struct hlist_nulls_head tmpl;
72}; 71};
73 72
74struct netns_ct { 73struct netns_ct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 05a8c1aea251..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
902void sk_set_memalloc(struct sock *sk); 902void sk_set_memalloc(struct sock *sk);
903void sk_clear_memalloc(struct sock *sk); 903void sk_clear_memalloc(struct sock *sk);
904 904
905int sk_wait_data(struct sock *sk, long *timeo); 905int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
906 906
907struct request_sock_ops; 907struct request_sock_ops;
908struct timewait_sock_ops; 908struct timewait_sock_ops;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 986fddb08579..b0f898e3b2e7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1745,6 +1745,7 @@ struct ib_device {
1745 char node_desc[64]; 1745 char node_desc[64];
1746 __be64 node_guid; 1746 __be64 node_guid;
1747 u32 local_dma_lkey; 1747 u32 local_dma_lkey;
1748 u16 is_switch:1;
1748 u8 node_type; 1749 u8 node_type;
1749 u8 phys_port_cnt; 1750 u8 phys_port_cnt;
1750 1751
@@ -1824,6 +1825,20 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1824 u8 port_num); 1825 u8 port_num);
1825 1826
1826/** 1827/**
1828 * rdma_cap_ib_switch - Check if the device is IB switch
1829 * @device: Device to check
1830 *
1831 * Device driver is responsible for setting is_switch bit on
1832 * in ib_device structure at init time.
1833 *
1834 * Return: true if the device is IB switch.
1835 */
1836static inline bool rdma_cap_ib_switch(const struct ib_device *device)
1837{
1838 return device->is_switch;
1839}
1840
1841/**
1827 * rdma_start_port - Return the first valid port number for the device 1842 * rdma_start_port - Return the first valid port number for the device
1828 * specified 1843 * specified
1829 * 1844 *
@@ -1833,7 +1848,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
1833 */ 1848 */
1834static inline u8 rdma_start_port(const struct ib_device *device) 1849static inline u8 rdma_start_port(const struct ib_device *device)
1835{ 1850{
1836 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; 1851 return rdma_cap_ib_switch(device) ? 0 : 1;
1837} 1852}
1838 1853
1839/** 1854/**
@@ -1846,8 +1861,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
1846 */ 1861 */
1847static inline u8 rdma_end_port(const struct ib_device *device) 1862static inline u8 rdma_end_port(const struct ib_device *device)
1848{ 1863{
1849 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 1864 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
1850 0 : device->phys_port_cnt;
1851} 1865}
1852 1866
1853static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num) 1867static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 4942710ef720..8d1d7fa67ec4 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
28 u64 * info_out); 28 u64 * info_out);
29 29
30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq); 30extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
31extern void scsi_set_sense_information(u8 *buf, u64 info);
32 31
33extern int scsi_ioctl_reset(struct scsi_device *, int __user *); 32extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
34 33
diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h
index cdb05dd1d440..d40d3ef25707 100644
--- a/include/scsi/scsi_transport_srp.h
+++ b/include/scsi/scsi_transport_srp.h
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
119extern void srp_rport_del(struct srp_rport *); 119extern void srp_rport_del(struct srp_rport *);
120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, 120extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
121 int dev_loss_tmo); 121 int dev_loss_tmo);
122int srp_parse_tmo(int *tmo, const char *buf);
122extern int srp_reconnect_rport(struct srp_rport *rport); 123extern int srp_reconnect_rport(struct srp_rport *rport);
123extern void srp_start_tl_fail_timers(struct srp_rport *rport); 124extern void srp_start_tl_fail_timers(struct srp_rport *rport);
124extern void srp_remove_host(struct Scsi_Host *); 125extern void srp_remove_host(struct Scsi_Host *);
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b49bed..74bc85473b58 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@ static inline int snd_ac97_update_power(struct snd_ac97 *ac97, int reg,
584void snd_ac97_suspend(struct snd_ac97 *ac97); 584void snd_ac97_suspend(struct snd_ac97 *ac97);
585void snd_ac97_resume(struct snd_ac97 *ac97); 585void snd_ac97_resume(struct snd_ac97 *ac97);
586#endif 586#endif
587int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
588 unsigned int id_mask);
587 589
588/* quirk types */ 590/* quirk types */
589enum { 591enum {
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c175f6..bb7b2ebfee7b 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@ struct rsnd_src_platform_info {
61/* 61/*
62 * flags 62 * flags
63 */ 63 */
64struct rsnd_ctu_platform_info {
65 u32 flags;
66};
67
68struct rsnd_mix_platform_info {
69 u32 flags;
70};
71
64struct rsnd_dvc_platform_info { 72struct rsnd_dvc_platform_info {
65 u32 flags; 73 u32 flags;
66}; 74};
@@ -68,6 +76,8 @@ struct rsnd_dvc_platform_info {
68struct rsnd_dai_path_info { 76struct rsnd_dai_path_info {
69 struct rsnd_ssi_platform_info *ssi; 77 struct rsnd_ssi_platform_info *ssi;
70 struct rsnd_src_platform_info *src; 78 struct rsnd_src_platform_info *src;
79 struct rsnd_ctu_platform_info *ctu;
80 struct rsnd_mix_platform_info *mix;
71 struct rsnd_dvc_platform_info *dvc; 81 struct rsnd_dvc_platform_info *dvc;
72}; 82};
73 83
@@ -93,6 +103,10 @@ struct rcar_snd_info {
93 int ssi_info_nr; 103 int ssi_info_nr;
94 struct rsnd_src_platform_info *src_info; 104 struct rsnd_src_platform_info *src_info;
95 int src_info_nr; 105 int src_info_nr;
106 struct rsnd_ctu_platform_info *ctu_info;
107 int ctu_info_nr;
108 struct rsnd_mix_platform_info *mix_info;
109 int mix_info_nr;
96 struct rsnd_dvc_platform_info *dvc_info; 110 struct rsnd_dvc_platform_info *dvc_info;
97 int dvc_info_nr; 111 int dvc_info_nr;
98 struct rsnd_dai_platform_info *dai_info; 112 struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a898275..5abba037d245 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
397 const struct snd_soc_dapm_route *route, int num); 397 const struct snd_soc_dapm_route *route, int num);
398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, 398int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
399 const struct snd_soc_dapm_route *route, int num); 399 const struct snd_soc_dapm_route *route, int num);
400void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
400 401
401/* dapm events */ 402/* dapm events */
402void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, 403void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@ struct snd_soc_dapm_route {
511struct snd_soc_dapm_path { 512struct snd_soc_dapm_path {
512 const char *name; 513 const char *name;
513 514
514 /* source (input) and sink (output) widgets */ 515 /*
515 struct snd_soc_dapm_widget *source; 516 * source (input) and sink (output) widgets
516 struct snd_soc_dapm_widget *sink; 517 * The union is for convience, since it is a lot nicer to type
518 * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
519 */
520 union {
521 struct {
522 struct snd_soc_dapm_widget *source;
523 struct snd_soc_dapm_widget *sink;
524 };
525 struct snd_soc_dapm_widget *node[2];
526 };
517 527
518 /* status */ 528 /* status */
519 u32 connect:1; /* source and sink widgets are connected */ 529 u32 connect:1; /* source and sink widgets are connected */
@@ -524,8 +534,7 @@ struct snd_soc_dapm_path {
524 int (*connected)(struct snd_soc_dapm_widget *source, 534 int (*connected)(struct snd_soc_dapm_widget *source,
525 struct snd_soc_dapm_widget *sink); 535 struct snd_soc_dapm_widget *sink);
526 536
527 struct list_head list_source; 537 struct list_head list_node[2];
528 struct list_head list_sink;
529 struct list_head list_kcontrol; 538 struct list_head list_kcontrol;
530 struct list_head list; 539 struct list_head list;
531}; 540};
@@ -559,8 +568,7 @@ struct snd_soc_dapm_widget {
559 unsigned char new_power:1; /* power from this run */ 568 unsigned char new_power:1; /* power from this run */
560 unsigned char power_checked:1; /* power checked this run */ 569 unsigned char power_checked:1; /* power checked this run */
561 unsigned char is_supply:1; /* Widget is a supply type widget */ 570 unsigned char is_supply:1; /* Widget is a supply type widget */
562 unsigned char is_sink:1; /* Widget is a sink type widget */ 571 unsigned char is_ep:2; /* Widget is a endpoint type widget */
563 unsigned char is_source:1; /* Widget is a source type widget */
564 int subseq; /* sort within widget type */ 572 int subseq; /* sort within widget type */
565 573
566 int (*power_check)(struct snd_soc_dapm_widget *w); 574 int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@ struct snd_soc_dapm_widget {
575 struct snd_kcontrol **kcontrols; 583 struct snd_kcontrol **kcontrols;
576 struct snd_soc_dobj dobj; 584 struct snd_soc_dobj dobj;
577 585
578 /* widget input and outputs */ 586 /* widget input and output edges */
579 struct list_head sources; 587 struct list_head edges[2];
580 struct list_head sinks;
581 588
582 /* used during DAPM updates */ 589 /* used during DAPM updates */
583 struct list_head work_list; 590 struct list_head work_list;
584 struct list_head power_list; 591 struct list_head power_list;
585 struct list_head dirty; 592 struct list_head dirty;
586 int inputs; 593 int endpoints[2];
587 int outputs;
588 594
589 struct clk *clk; 595 struct clk *clk;
590}; 596};
@@ -672,4 +678,58 @@ static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
672 return dapm->bias_level; 678 return dapm->bias_level;
673} 679}
674 680
681enum snd_soc_dapm_direction {
682 SND_SOC_DAPM_DIR_IN,
683 SND_SOC_DAPM_DIR_OUT
684};
685
686#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
687
688#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
689#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
690
691/**
692 * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
693 * specified direction of a widget
694 * @w: The widget
695 * @dir: Whether to iterate over the paths where the specified widget is the
696 * incoming or outgoing widgets
697 * @p: The path iterator variable
698 */
699#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
700 list_for_each_entry(p, &w->edges[dir], list_node[dir])
701
702/**
703 * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
704 * specified direction of a widget
705 * @w: The widget
706 * @dir: Whether to iterate over the paths where the specified widget is the
707 * incoming or outgoing widgets
708 * @p: The path iterator variable
709 * @next_p: Temporary storage for the next path
710 *
711 * This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
712 * it is safe to remove the current path from the list while iterating
713 */
714#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
715 list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
716
717/**
718 * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
719 * widget
720 * @w: The widget
721 * @p: The path iterator variable
722 */
723#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
724 snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
725
726/**
727 * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
728 * a widget
729 * @w: The widget
730 * @p: The path iterator variable
731 */
732#define snd_soc_dapm_widget_for_each_source_path(w, p) \
733 snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
734
675#endif 735#endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 865a141b118b..427bc41df3ae 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
141 int io_ops_count; 141 int io_ops_count;
142}; 142};
143 143
144#ifdef CONFIG_SND_SOC_TOPOLOGY
145
144/* gets a pointer to data from the firmware block header */ 146/* gets a pointer to data from the firmware block header */
145static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr) 147static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
146{ 148{
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
165 const struct snd_soc_tplg_widget_events *events, int num_events, 167 const struct snd_soc_tplg_widget_events *events, int num_events,
166 u16 event_type); 168 u16 event_type);
167 169
170#else
171
172static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
173 u32 index)
174{
175 return 0;
176}
177
178#endif
179
168#endif 180#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf9d54a..884e728b09d9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -526,7 +526,8 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
526 526
527#ifdef CONFIG_SND_SOC_AC97_BUS 527#ifdef CONFIG_SND_SOC_AC97_BUS
528struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec); 528struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
529struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec); 529struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
530 unsigned int id, unsigned int id_mask);
530void snd_soc_free_ac97_codec(struct snd_ac97 *ac97); 531void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
531 532
532int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops); 533int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -619,6 +620,7 @@ int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
619 * @pin: name of the pin to update 620 * @pin: name of the pin to update
620 * @mask: bits to check for in reported jack status 621 * @mask: bits to check for in reported jack status
621 * @invert: if non-zero then pin is enabled when status is not reported 622 * @invert: if non-zero then pin is enabled when status is not reported
623 * @list: internal list entry
622 */ 624 */
623struct snd_soc_jack_pin { 625struct snd_soc_jack_pin {
624 struct list_head list; 626 struct list_head list;
@@ -635,7 +637,7 @@ struct snd_soc_jack_pin {
635 * @jack_type: type of jack that is expected for this voltage 637 * @jack_type: type of jack that is expected for this voltage
636 * @debounce_time: debounce_time for jack, codec driver should wait for this 638 * @debounce_time: debounce_time for jack, codec driver should wait for this
637 * duration before reading the adc for voltages 639 * duration before reading the adc for voltages
638 * @:list: list container 640 * @list: internal list entry
639 */ 641 */
640struct snd_soc_jack_zone { 642struct snd_soc_jack_zone {
641 unsigned int min_mv; 643 unsigned int min_mv;
@@ -651,12 +653,12 @@ struct snd_soc_jack_zone {
651 * @gpio: legacy gpio number 653 * @gpio: legacy gpio number
652 * @idx: gpio descriptor index within the function of the GPIO 654 * @idx: gpio descriptor index within the function of the GPIO
653 * consumer device 655 * consumer device
654 * @gpiod_dev GPIO consumer device 656 * @gpiod_dev: GPIO consumer device
655 * @name: gpio name. Also as connection ID for the GPIO consumer 657 * @name: gpio name. Also as connection ID for the GPIO consumer
656 * device function name lookup 658 * device function name lookup
657 * @report: value to report when jack detected 659 * @report: value to report when jack detected
658 * @invert: report presence in low state 660 * @invert: report presence in low state
659 * @debouce_time: debouce time in ms 661 * @debounce_time: debounce time in ms
660 * @wake: enable as wake source 662 * @wake: enable as wake source
661 * @jack_status_check: callback function which overrides the detection 663 * @jack_status_check: callback function which overrides the detection
662 * to provide more complex checks (eg, reading an 664 * to provide more complex checks (eg, reading an
@@ -672,11 +674,13 @@ struct snd_soc_jack_gpio {
672 int debounce_time; 674 int debounce_time;
673 bool wake; 675 bool wake;
674 676
677 /* private: */
675 struct snd_soc_jack *jack; 678 struct snd_soc_jack *jack;
676 struct delayed_work work; 679 struct delayed_work work;
677 struct gpio_desc *desc; 680 struct gpio_desc *desc;
678 681
679 void *data; 682 void *data;
683 /* public: */
680 int (*jack_status_check)(void *data); 684 int (*jack_status_check)(void *data);
681}; 685};
682 686
@@ -758,7 +762,6 @@ struct snd_soc_component {
758 762
759 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */ 763 unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
760 unsigned int registered_as_component:1; 764 unsigned int registered_as_component:1;
761 unsigned int probed:1;
762 765
763 struct list_head list; 766 struct list_head list;
764 767
@@ -792,7 +795,6 @@ struct snd_soc_component {
792 795
793 /* Don't use these, use snd_soc_component_get_dapm() */ 796 /* Don't use these, use snd_soc_component_get_dapm() */
794 struct snd_soc_dapm_context dapm; 797 struct snd_soc_dapm_context dapm;
795 struct snd_soc_dapm_context *dapm_ptr;
796 798
797 const struct snd_kcontrol_new *controls; 799 const struct snd_kcontrol_new *controls;
798 unsigned int num_controls; 800 unsigned int num_controls;
@@ -832,9 +834,6 @@ struct snd_soc_codec {
832 /* component */ 834 /* component */
833 struct snd_soc_component component; 835 struct snd_soc_component component;
834 836
835 /* Don't access this directly, use snd_soc_codec_get_dapm() */
836 struct snd_soc_dapm_context dapm;
837
838#ifdef CONFIG_DEBUG_FS 837#ifdef CONFIG_DEBUG_FS
839 struct dentry *debugfs_reg; 838 struct dentry *debugfs_reg;
840#endif 839#endif
@@ -1277,7 +1276,7 @@ static inline struct snd_soc_component *snd_soc_dapm_to_component(
1277static inline struct snd_soc_codec *snd_soc_dapm_to_codec( 1276static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
1278 struct snd_soc_dapm_context *dapm) 1277 struct snd_soc_dapm_context *dapm)
1279{ 1278{
1280 return container_of(dapm, struct snd_soc_codec, dapm); 1279 return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
1281} 1280}
1282 1281
1283/** 1282/**
@@ -1302,7 +1301,7 @@ static inline struct snd_soc_platform *snd_soc_dapm_to_platform(
1302static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm( 1301static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
1303 struct snd_soc_component *component) 1302 struct snd_soc_component *component)
1304{ 1303{
1305 return component->dapm_ptr; 1304 return &component->dapm;
1306} 1305}
1307 1306
1308/** 1307/**
@@ -1314,12 +1313,12 @@ static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
1314static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm( 1313static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
1315 struct snd_soc_codec *codec) 1314 struct snd_soc_codec *codec)
1316{ 1315{
1317 return &codec->dapm; 1316 return snd_soc_component_get_dapm(&codec->component);
1318} 1317}
1319 1318
1320/** 1319/**
1321 * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level 1320 * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
1322 * @dapm: The CODEC for which to initialize the DAPM bias level 1321 * @codec: The CODEC for which to initialize the DAPM bias level
1323 * @level: The DAPM level to initialize to 1322 * @level: The DAPM level to initialize to
1324 * 1323 *
1325 * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level(). 1324 * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1603,10 @@ int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
1604int snd_soc_of_parse_tdm_slot(struct device_node *np, 1603int snd_soc_of_parse_tdm_slot(struct device_node *np,
1605 unsigned int *slots, 1604 unsigned int *slots,
1606 unsigned int *slot_width); 1605 unsigned int *slot_width);
1606void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
1607 struct snd_soc_codec_conf *codec_conf,
1608 struct device_node *of_node,
1609 const char *propname);
1607int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, 1610int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
1608 const char *propname); 1611 const char *propname);
1609unsigned int snd_soc_of_parse_daifmt(struct device_node *np, 1612unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 34117b8b72e4..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -595,6 +595,7 @@ struct iscsi_conn {
595 int bitmap_id; 595 int bitmap_id;
596 int rx_thread_active; 596 int rx_thread_active;
597 struct task_struct *rx_thread; 597 struct task_struct *rx_thread;
598 struct completion rx_login_comp;
598 int tx_thread_active; 599 int tx_thread_active;
599 struct task_struct *tx_thread; 600 struct task_struct *tx_thread;
600 /* list_head for session connection list */ 601 /* list_head for session connection list */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d96d0f..317a1ed2f4ac 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9 9
10#define DAPM_DIRECT "(direct)" 10#define DAPM_DIRECT "(direct)"
11#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
11 12
12struct snd_soc_jack; 13struct snd_soc_jack;
13struct snd_soc_codec; 14struct snd_soc_codec;
@@ -152,62 +153,38 @@ TRACE_EVENT(snd_soc_dapm_walk_done,
152 (int)__entry->path_checks, (int)__entry->neighbour_checks) 153 (int)__entry->path_checks, (int)__entry->neighbour_checks)
153); 154);
154 155
155TRACE_EVENT(snd_soc_dapm_output_path, 156TRACE_EVENT(snd_soc_dapm_path,
156 157
157 TP_PROTO(struct snd_soc_dapm_widget *widget, 158 TP_PROTO(struct snd_soc_dapm_widget *widget,
159 enum snd_soc_dapm_direction dir,
158 struct snd_soc_dapm_path *path), 160 struct snd_soc_dapm_path *path),
159 161
160 TP_ARGS(widget, path), 162 TP_ARGS(widget, dir, path),
161 163
162 TP_STRUCT__entry( 164 TP_STRUCT__entry(
163 __string( wname, widget->name ) 165 __string( wname, widget->name )
164 __string( pname, path->name ? path->name : DAPM_DIRECT) 166 __string( pname, path->name ? path->name : DAPM_DIRECT)
165 __string( psname, path->sink->name ) 167 __string( pnname, path->node[dir]->name )
166 __field( int, path_sink ) 168 __field( int, path_node )
167 __field( int, path_connect ) 169 __field( int, path_connect )
170 __field( int, path_dir )
168 ), 171 ),
169 172
170 TP_fast_assign( 173 TP_fast_assign(
171 __assign_str(wname, widget->name); 174 __assign_str(wname, widget->name);
172 __assign_str(pname, path->name ? path->name : DAPM_DIRECT); 175 __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
173 __assign_str(psname, path->sink->name); 176 __assign_str(pnname, path->node[dir]->name);
174 __entry->path_connect = path->connect; 177 __entry->path_connect = path->connect;
175 __entry->path_sink = (long)path->sink; 178 __entry->path_node = (long)path->node[dir];
179 __entry->path_dir = dir;
176 ), 180 ),
177 181
178 TP_printk("%c%s -> %s -> %s", 182 TP_printk("%c%s %s %s %s %s",
179 (int) __entry->path_sink && 183 (int) __entry->path_node &&
180 (int) __entry->path_connect ? '*' : ' ', 184 (int) __entry->path_connect ? '*' : ' ',
181 __get_str(wname), __get_str(pname), __get_str(psname)) 185 __get_str(wname), DAPM_ARROW(__entry->path_dir),
182); 186 __get_str(pname), DAPM_ARROW(__entry->path_dir),
183 187 __get_str(pnname))
184TRACE_EVENT(snd_soc_dapm_input_path,
185
186 TP_PROTO(struct snd_soc_dapm_widget *widget,
187 struct snd_soc_dapm_path *path),
188
189 TP_ARGS(widget, path),
190
191 TP_STRUCT__entry(
192 __string( wname, widget->name )
193 __string( pname, path->name ? path->name : DAPM_DIRECT)
194 __string( psname, path->source->name )
195 __field( int, path_source )
196 __field( int, path_connect )
197 ),
198
199 TP_fast_assign(
200 __assign_str(wname, widget->name);
201 __assign_str(pname, path->name ? path->name : DAPM_DIRECT);
202 __assign_str(psname, path->source->name);
203 __entry->path_connect = path->connect;
204 __entry->path_source = (long)path->source;
205 ),
206
207 TP_printk("%c%s <- %s <- %s",
208 (int) __entry->path_source &&
209 (int) __entry->path_connect ? '*' : ' ',
210 __get_str(wname), __get_str(pname), __get_str(psname))
211); 188);
212 189
213TRACE_EVENT(snd_soc_dapm_connected, 190TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6fce900a833..fbdd11851725 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -32,7 +32,7 @@
32#ifndef __AMDGPU_DRM_H__ 32#ifndef __AMDGPU_DRM_H__
33#define __AMDGPU_DRM_H__ 33#define __AMDGPU_DRM_H__
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37#define DRM_AMDGPU_GEM_CREATE 0x00 37#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 38#define DRM_AMDGPU_GEM_MMAP 0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
614 uint32_t vram_type; 614 uint32_t vram_type;
615 /** video memory bit width*/ 615 /** video memory bit width*/
616 uint32_t vram_bit_width; 616 uint32_t vram_bit_width;
617 /* vce harvesting instance */
618 uint32_t vce_harvest_config;
617}; 619};
618 620
619struct drm_amdgpu_info_hw_ip { 621struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 6e1a2ed116cb..db809b722985 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
1070 __u64 offset; 1070 __u64 offset;
1071 __u64 val; /* Return value */ 1071 __u64 val; /* Return value */
1072}; 1072};
1073/* Known registers:
1074 *
1075 * Render engine timestamp - 0x2358 + 64bit - gen7+
1076 * - Note this register returns an invalid value if using the default
1077 * single instruction 8byte read, in order to workaround that use
1078 * offset (0x2538 | 1) instead.
1079 *
1080 */
1073 1081
1074struct drm_i915_reset_stats { 1082struct drm_i915_reset_stats {
1075 __u32 ctx_id; 1083 __u32 ctx_id;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1ef76661e1a1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index 669a1f0b1d97..23cbd34e4ac7 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -15,6 +15,7 @@ enum {
15 NETCONFA_RP_FILTER, 15 NETCONFA_RP_FILTER,
16 NETCONFA_MC_FORWARDING, 16 NETCONFA_MC_FORWARDING,
17 NETCONFA_PROXY_NEIGH, 17 NETCONFA_PROXY_NEIGH,
18 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
18 __NETCONFA_MAX 19 __NETCONFA_MAX
19}; 20};
20#define NETCONFA_MAX (__NETCONFA_MAX - 1) 21#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ 319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ 320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ 321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
322#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ 323#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
323 324
324/* MSI-X Table entry format */ 325/* MSI-X Table entry format */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
34/* The feature bitmap for virtio net */ 34/* The feature bitmap for virtio net */
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 38#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
38#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
39#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
226 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 228 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
228 229
230/*
231 * Control network offloads
232 *
233 * Reconfigures the network offloads that Guest can handle.
234 *
235 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
236 *
237 * Command data format matches the feature bit mask exactly.
238 *
239 * See VIRTIO_NET_F_GUEST_* for the list of offloads
240 * that can be enabled/disabled.
241 */
242#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
243#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
244
229#endif /* _LINUX_VIRTIO_NET_H */ 245#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
157 __le32 queue_used_hi; /* read-write */ 157 __le32 queue_used_hi; /* read-write */
158}; 158};
159 159
160/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
161struct virtio_pci_cfg_cap {
162 struct virtio_pci_cap cap;
163 __u8 pci_cfg_data[4]; /* Data for BAR access. */
164};
165
160/* Macro versions of offsets for the Old Timers! */ 166/* Macro versions of offsets for the Old Timers! */
161#define VIRTIO_PCI_CAP_VNDR 0 167#define VIRTIO_PCI_CAP_VNDR 0
162#define VIRTIO_PCI_CAP_NEXT 1 168#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
31 * SUCH DAMAGE. 31 * SUCH DAMAGE.
32 * 32 *
33 * Copyright Rusty Russell IBM Corporation 2007. */ 33 * Copyright Rusty Russell IBM Corporation 2007. */
34#ifndef __KERNEL__
35#include <stdint.h>
36#endif
34#include <linux/types.h> 37#include <linux/types.h>
35#include <linux/virtio_types.h> 38#include <linux/virtio_types.h>
36 39
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
143 vr->num = num; 146 vr->num = num;
144 vr->desc = p; 147 vr->desc = p;
145 vr->avail = p + num*sizeof(struct vring_desc); 148 vr->avail = p + num*sizeof(struct vring_desc);
146 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) 149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
147 + align-1) & ~(align - 1)); 150 + align-1) & ~(align - 1));
148} 151}
149 152
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 12215205ab8d..247c50bd60f0 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,6 +18,12 @@
18#include <linux/types.h> 18#include <linux/types.h>
19#include <sound/asound.h> 19#include <sound/asound.h>
20 20
21#ifndef __KERNEL__
22#error This API is an early revision and not enabled in the current
23#error kernel release, it will be enabled in a future kernel version
24#error with incompatible changes to what is here.
25#endif
26
21/* 27/*
22 * Maximum number of channels topology kcontrol can represent. 28 * Maximum number of channels topology kcontrol can represent.
23 */ 29 */
@@ -77,7 +83,7 @@
77#define SND_SOC_TPLG_NUM_TEXTS 16 83#define SND_SOC_TPLG_NUM_TEXTS 16
78 84
79/* ABI version */ 85/* ABI version */
80#define SND_SOC_TPLG_ABI_VERSION 0x2 86#define SND_SOC_TPLG_ABI_VERSION 0x3
81 87
82/* Max size of TLV data */ 88/* Max size of TLV data */
83#define SND_SOC_TPLG_TLV_SIZE 32 89#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +103,8 @@
97#define SND_SOC_TPLG_TYPE_PCM 7 103#define SND_SOC_TPLG_TYPE_PCM 7
98#define SND_SOC_TPLG_TYPE_MANIFEST 8 104#define SND_SOC_TPLG_TYPE_MANIFEST 8
99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9 105#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
100#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK 106#define SND_SOC_TPLG_TYPE_PDATA 10
107#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
101 108
102/* vendor block IDs - please add new vendor types to end */ 109/* vendor block IDs - please add new vendor types to end */
103#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 110#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -110,7 +117,7 @@
110 117
111/* 118/*
112 * Block Header. 119 * Block Header.
113 * This header preceeds all object and object arrays below. 120 * This header precedes all object and object arrays below.
114 */ 121 */
115struct snd_soc_tplg_hdr { 122struct snd_soc_tplg_hdr {
116 __le32 magic; /* magic number */ 123 __le32 magic; /* magic number */
@@ -137,11 +144,19 @@ struct snd_soc_tplg_private {
137/* 144/*
138 * Kcontrol TLV data. 145 * Kcontrol TLV data.
139 */ 146 */
147struct snd_soc_tplg_tlv_dbscale {
148 __le32 min;
149 __le32 step;
150 __le32 mute;
151} __attribute__((packed));
152
140struct snd_soc_tplg_ctl_tlv { 153struct snd_soc_tplg_ctl_tlv {
141 __le32 size; /* in bytes aligned to 4 */ 154 __le32 size; /* in bytes of this structure */
142 __le32 numid; /* control element numeric identification */ 155 __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
143 __le32 count; /* number of elem in data array */ 156 union {
144 __le32 data[SND_SOC_TPLG_TLV_SIZE]; 157 __le32 data[SND_SOC_TPLG_TLV_SIZE];
158 struct snd_soc_tplg_tlv_dbscale scale;
159 };
145} __attribute__((packed)); 160} __attribute__((packed));
146 161
147/* 162/*
@@ -155,9 +170,11 @@ struct snd_soc_tplg_channel {
155} __attribute__((packed)); 170} __attribute__((packed));
156 171
157/* 172/*
158 * Kcontrol Operations IDs 173 * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
174 * Kcontrol ops need get/put/info.
175 * Bytes ext ops need get/put.
159 */ 176 */
160struct snd_soc_tplg_kcontrol_ops_id { 177struct snd_soc_tplg_io_ops {
161 __le32 get; 178 __le32 get;
162 __le32 put; 179 __le32 put;
163 __le32 info; 180 __le32 info;
@@ -171,8 +188,8 @@ struct snd_soc_tplg_ctl_hdr {
171 __le32 type; 188 __le32 type;
172 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 189 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
173 __le32 access; 190 __le32 access;
174 struct snd_soc_tplg_kcontrol_ops_id ops; 191 struct snd_soc_tplg_io_ops ops;
175 __le32 tlv_size; /* non zero means control has TLV data */ 192 struct snd_soc_tplg_ctl_tlv tlv;
176} __attribute__((packed)); 193} __attribute__((packed));
177 194
178/* 195/*
@@ -222,7 +239,7 @@ struct snd_soc_tplg_stream_config {
222/* 239/*
223 * Manifest. List totals for each payload type. Not used in parsing, but will 240 * Manifest. List totals for each payload type. Not used in parsing, but will
224 * be passed to the component driver before any other objects in order for any 241 * be passed to the component driver before any other objects in order for any
225 * global componnent resource allocations. 242 * global component resource allocations.
226 * 243 *
227 * File block representation for manifest :- 244 * File block representation for manifest :-
228 * +-----------------------------------+----+ 245 * +-----------------------------------+----+
@@ -238,6 +255,7 @@ struct snd_soc_tplg_manifest {
238 __le32 graph_elems; /* number of graph elements */ 255 __le32 graph_elems; /* number of graph elements */
239 __le32 dai_elems; /* number of DAI elements */ 256 __le32 dai_elems; /* number of DAI elements */
240 __le32 dai_link_elems; /* number of DAI link elements */ 257 __le32 dai_link_elems; /* number of DAI link elements */
258 struct snd_soc_tplg_private priv;
241} __attribute__((packed)); 259} __attribute__((packed));
242 260
243/* 261/*
@@ -259,7 +277,6 @@ struct snd_soc_tplg_mixer_control {
259 __le32 invert; 277 __le32 invert;
260 __le32 num_channels; 278 __le32 num_channels;
261 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; 279 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
262 struct snd_soc_tplg_ctl_tlv tlv;
263 struct snd_soc_tplg_private priv; 280 struct snd_soc_tplg_private priv;
264} __attribute__((packed)); 281} __attribute__((packed));
265 282
@@ -303,6 +320,7 @@ struct snd_soc_tplg_bytes_control {
303 __le32 mask; 320 __le32 mask;
304 __le32 base; 321 __le32 base;
305 __le32 num_regs; 322 __le32 num_regs;
323 struct snd_soc_tplg_io_ops ext_ops;
306 struct snd_soc_tplg_private priv; 324 struct snd_soc_tplg_private priv;
307} __attribute__((packed)); 325} __attribute__((packed));
308 326
@@ -347,6 +365,7 @@ struct snd_soc_tplg_dapm_widget {
347 __le32 reg; /* negative reg = no direct dapm */ 365 __le32 reg; /* negative reg = no direct dapm */
348 __le32 shift; /* bits to shift */ 366 __le32 shift; /* bits to shift */
349 __le32 mask; /* non-shifted mask */ 367 __le32 mask; /* non-shifted mask */
368 __le32 subseq; /* sort within widget type */
350 __u32 invert; /* invert the power bit */ 369 __u32 invert; /* invert the power bit */
351 __u32 ignore_suspend; /* kept enabled over suspend */ 370 __u32 ignore_suspend; /* kept enabled over suspend */
352 __u16 event_flags; 371 __u16 event_flags;
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
656 key_init(); 656 key_init();
657 security_init(); 657 security_init();
658 dbg_late_init(); 658 dbg_late_init();
659 vfs_caches_init(totalram_pages); 659 vfs_caches_init();
660 signals_init(); 660 signals_init();
661 /* rootfs populating might need page-writeback */ 661 /* rootfs populating might need page-writeback */
662 page_writeback_init(); 662 page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
142 if (!leaf) 142 if (!leaf)
143 return -ENOMEM; 143 return -ENOMEM;
144 INIT_LIST_HEAD(&leaf->msg_list); 144 INIT_LIST_HEAD(&leaf->msg_list);
145 info->qsize += sizeof(*leaf);
146 } 145 }
147 leaf->priority = msg->m_type; 146 leaf->priority = msg->m_type;
148 rb_link_node(&leaf->rb_node, parent, p); 147 rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
187 "lazy leaf delete!\n"); 186 "lazy leaf delete!\n");
188 rb_erase(&leaf->rb_node, &info->msg_tree); 187 rb_erase(&leaf->rb_node, &info->msg_tree);
189 if (info->node_cache) { 188 if (info->node_cache) {
190 info->qsize -= sizeof(*leaf);
191 kfree(leaf); 189 kfree(leaf);
192 } else { 190 } else {
193 info->node_cache = leaf; 191 info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
200 if (list_empty(&leaf->msg_list)) { 198 if (list_empty(&leaf->msg_list)) {
201 rb_erase(&leaf->rb_node, &info->msg_tree); 199 rb_erase(&leaf->rb_node, &info->msg_tree);
202 if (info->node_cache) { 200 if (info->node_cache) {
203 info->qsize -= sizeof(*leaf);
204 kfree(leaf); 201 kfree(leaf);
205 } else { 202 } else {
206 info->node_cache = leaf; 203 info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1034 /* Save our speculative allocation into the cache */ 1031 /* Save our speculative allocation into the cache */
1035 INIT_LIST_HEAD(&new_leaf->msg_list); 1032 INIT_LIST_HEAD(&new_leaf->msg_list);
1036 info->node_cache = new_leaf; 1033 info->node_cache = new_leaf;
1037 info->qsize += sizeof(*new_leaf);
1038 new_leaf = NULL; 1034 new_leaf = NULL;
1039 } else { 1035 } else {
1040 kfree(new_leaf); 1036 kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1142 /* Save our speculative allocation into the cache */ 1138 /* Save our speculative allocation into the cache */
1143 INIT_LIST_HEAD(&new_leaf->msg_list); 1139 INIT_LIST_HEAD(&new_leaf->msg_list);
1144 info->node_cache = new_leaf; 1140 info->node_cache = new_leaf;
1145 info->qsize += sizeof(*new_leaf);
1146 } else { 1141 } else {
1147 kfree(new_leaf); 1142 kfree(new_leaf);
1148 } 1143 }
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
253} 253}
254 254
255/* 255/*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265/*
256 * Wait until all currently ongoing simple ops have completed. 266 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock. 267 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check 268 * New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
275 sem = sma->sem_base + i; 285 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock); 286 spin_unlock_wait(&sem->lock);
277 } 287 }
288 ipc_smp_acquire__after_spin_is_unlocked();
278} 289}
279 290
280/* 291/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
327 /* Then check that the global lock is free */ 338 /* Then check that the global lock is free */
328 if (!spin_is_locked(&sma->sem_perm.lock)) { 339 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* 340 /*
330 * The ipc object lock check must be visible on all 341 * We need a memory barrier with acquire semantics,
331 * cores before rechecking the complex count. Otherwise 342 * otherwise we can race with another thread that does:
332 * we can race with another thread that does:
333 * complex_count++; 343 * complex_count++;
334 * spin_unlock(sem_perm.lock); 344 * spin_unlock(sem_perm.lock);
335 */ 345 */
336 smp_rmb(); 346 ipc_smp_acquire__after_spin_is_unlocked();
337 347
338 /* 348 /*
339 * Now repeat the test of complex_count: 349 * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
2074 rcu_read_lock(); 2084 rcu_read_lock();
2075 un = list_entry_rcu(ulp->list_proc.next, 2085 un = list_entry_rcu(ulp->list_proc.next,
2076 struct sem_undo, list_proc); 2086 struct sem_undo, list_proc);
2077 if (&un->list_proc == &ulp->list_proc) 2087 if (&un->list_proc == &ulp->list_proc) {
2078 semid = -1; 2088 /*
2079 else 2089 * We must wait for freeary() before freeing this ulp,
2080 semid = un->semid; 2090 * in case we raced with last sem_undo. There is a small
2091 * possibility where we exit while freeary() didn't
2092 * finish unlocking sem_undo_list.
2093 */
2094 spin_unlock_wait(&ulp->lock);
2095 rcu_read_unlock();
2096 break;
2097 }
2098 spin_lock(&ulp->lock);
2099 semid = un->semid;
2100 spin_unlock(&ulp->lock);
2081 2101
2102 /* exit_sem raced with IPC_RMID, nothing to do */
2082 if (semid == -1) { 2103 if (semid == -1) {
2083 rcu_read_unlock(); 2104 rcu_read_unlock();
2084 break; 2105 continue;
2085 } 2106 }
2086 2107
2087 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); 2108 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2088 /* exit_sem raced with IPC_RMID, nothing to do */ 2109 /* exit_sem raced with IPC_RMID, nothing to do */
2089 if (IS_ERR(sma)) { 2110 if (IS_ERR(sma)) {
2090 rcu_read_unlock(); 2111 rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
2112 ipc_assert_locked_object(&sma->sem_perm); 2133 ipc_assert_locked_object(&sma->sem_perm);
2113 list_del(&un->list_id); 2134 list_del(&un->list_id);
2114 2135
2115 spin_lock(&ulp->lock); 2136 /* we are the last process using this ulp, acquiring ulp->lock
2137 * isn't required. Besides that, we are also protected against
2138 * IPC_RMID as we hold sma->sem_perm lock now
2139 */
2116 list_del_rcu(&un->list_proc); 2140 list_del_rcu(&un->list_proc);
2117 spin_unlock(&ulp->lock);
2118 2141
2119 /* perform adjustments registered in un */ 2142 /* perform adjustments registered in un */
2120 for (i = 0; i < sma->sem_nsems; i++) { 2143 for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
545 if ((shmflg & SHM_NORESERVE) && 545 if ((shmflg & SHM_NORESERVE) &&
546 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 546 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
547 acctflag = VM_NORESERVE; 547 acctflag = VM_NORESERVE;
548 file = shmem_file_setup(name, size, acctflag); 548 file = shmem_kernel_file_setup(name, size, acctflag);
549 } 549 }
550 error = PTR_ERR(file); 550 error = PTR_ERR(file);
551 if (IS_ERR(file)) 551 if (IS_ERR(file))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 09c65640cad6..e85bdfd15fed 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1021,8 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
1021 * for strings that are too long, we should not have created 1021 * for strings that are too long, we should not have created
1022 * any. 1022 * any.
1023 */ 1023 */
1024 if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) { 1024 if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
1025 WARN_ON(1);
1026 send_sig(SIGKILL, current, 0); 1025 send_sig(SIGKILL, current, 0);
1027 return -1; 1026 return -1;
1028 } 1027 }
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9c9c9fab16cc..5644ec5582b9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,7 @@
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/lockdep.h> 22#include <linux/lockdep.h>
23#include <linux/tick.h> 23#include <linux/tick.h>
24#include <linux/irq.h>
24#include <trace/events/power.h> 25#include <trace/events/power.h>
25 26
26#include "smpboot.h" 27#include "smpboot.h"
@@ -392,13 +393,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
392 smpboot_park_threads(cpu); 393 smpboot_park_threads(cpu);
393 394
394 /* 395 /*
395 * So now all preempt/rcu users must observe !cpu_active(). 396 * Prevent irq alloc/free while the dying cpu reorganizes the
397 * interrupt affinities.
396 */ 398 */
399 irq_lock_sparse();
397 400
401 /*
402 * So now all preempt/rcu users must observe !cpu_active().
403 */
398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 404 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
399 if (err) { 405 if (err) {
400 /* CPU didn't die: tell everyone. Can't complain. */ 406 /* CPU didn't die: tell everyone. Can't complain. */
401 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 407 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
408 irq_unlock_sparse();
402 goto out_release; 409 goto out_release;
403 } 410 }
404 BUG_ON(cpu_online(cpu)); 411 BUG_ON(cpu_online(cpu));
@@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
415 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ 422 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
416 per_cpu(cpu_dead_idle, cpu) = false; 423 per_cpu(cpu_dead_idle, cpu) = false;
417 424
425 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
426 irq_unlock_sparse();
427
418 hotplug_cpu__broadcast_tick_pull(cpu); 428 hotplug_cpu__broadcast_tick_pull(cpu);
419 /* This actually kills the CPU. */ 429 /* This actually kills the CPU. */
420 __cpu_die(cpu); 430 __cpu_die(cpu);
@@ -519,6 +529,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
519 529
520 /* Arch-specific enabling code. */ 530 /* Arch-specific enabling code. */
521 ret = __cpu_up(cpu, idle); 531 ret = __cpu_up(cpu, idle);
532
522 if (ret != 0) 533 if (ret != 0)
523 goto out_notify; 534 goto out_notify;
524 BUG_ON(!cpu_online(cpu)); 535 BUG_ON(!cpu_online(cpu));
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ee14e3a35a29..f0acff0f66c9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1223 spin_unlock_irq(&callback_lock); 1223 spin_unlock_irq(&callback_lock);
1224 1224
1225 /* use trialcs->mems_allowed as a temp variable */ 1225 /* use trialcs->mems_allowed as a temp variable */
1226 update_nodemasks_hier(cs, &cs->mems_allowed); 1226 update_nodemasks_hier(cs, &trialcs->mems_allowed);
1227done: 1227done:
1228 return retval; 1228 return retval;
1229} 1229}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e965cfae4207..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
1868 1868
1869 perf_pmu_disable(event->pmu); 1869 perf_pmu_disable(event->pmu);
1870 1870
1871 event->tstamp_running += tstamp - event->tstamp_stopped;
1872
1873 perf_set_shadow_time(event, ctx, tstamp); 1871 perf_set_shadow_time(event, ctx, tstamp);
1874 1872
1875 perf_log_itrace_start(event); 1873 perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
1881 goto out; 1879 goto out;
1882 } 1880 }
1883 1881
1882 event->tstamp_running += tstamp - event->tstamp_stopped;
1883
1884 if (!is_software_event(event)) 1884 if (!is_software_event(event))
1885 cpuctx->active_oncpu++; 1885 cpuctx->active_oncpu++;
1886 if (!ctx->nr_active++) 1886 if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
3958 perf_event_for_each_child(sibling, func); 3958 perf_event_for_each_child(sibling, func);
3959} 3959}
3960 3960
3961static int perf_event_period(struct perf_event *event, u64 __user *arg) 3961struct period_event {
3962{ 3962 struct perf_event *event;
3963 struct perf_event_context *ctx = event->ctx;
3964 int ret = 0, active;
3965 u64 value; 3963 u64 value;
3964};
3966 3965
3967 if (!is_sampling_event(event)) 3966static int __perf_event_period(void *info)
3968 return -EINVAL; 3967{
3969 3968 struct period_event *pe = info;
3970 if (copy_from_user(&value, arg, sizeof(value))) 3969 struct perf_event *event = pe->event;
3971 return -EFAULT; 3970 struct perf_event_context *ctx = event->ctx;
3972 3971 u64 value = pe->value;
3973 if (!value) 3972 bool active;
3974 return -EINVAL;
3975 3973
3976 raw_spin_lock_irq(&ctx->lock); 3974 raw_spin_lock(&ctx->lock);
3977 if (event->attr.freq) { 3975 if (event->attr.freq) {
3978 if (value > sysctl_perf_event_sample_rate) {
3979 ret = -EINVAL;
3980 goto unlock;
3981 }
3982
3983 event->attr.sample_freq = value; 3976 event->attr.sample_freq = value;
3984 } else { 3977 } else {
3985 event->attr.sample_period = value; 3978 event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
3998 event->pmu->start(event, PERF_EF_RELOAD); 3991 event->pmu->start(event, PERF_EF_RELOAD);
3999 perf_pmu_enable(ctx->pmu); 3992 perf_pmu_enable(ctx->pmu);
4000 } 3993 }
3994 raw_spin_unlock(&ctx->lock);
4001 3995
4002unlock: 3996 return 0;
3997}
3998
3999static int perf_event_period(struct perf_event *event, u64 __user *arg)
4000{
4001 struct period_event pe = { .event = event, };
4002 struct perf_event_context *ctx = event->ctx;
4003 struct task_struct *task;
4004 u64 value;
4005
4006 if (!is_sampling_event(event))
4007 return -EINVAL;
4008
4009 if (copy_from_user(&value, arg, sizeof(value)))
4010 return -EFAULT;
4011
4012 if (!value)
4013 return -EINVAL;
4014
4015 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4016 return -EINVAL;
4017
4018 task = ctx->task;
4019 pe.value = value;
4020
4021 if (!task) {
4022 cpu_function_call(event->cpu, __perf_event_period, &pe);
4023 return 0;
4024 }
4025
4026retry:
4027 if (!task_function_call(task, __perf_event_period, &pe))
4028 return 0;
4029
4030 raw_spin_lock_irq(&ctx->lock);
4031 if (ctx->is_active) {
4032 raw_spin_unlock_irq(&ctx->lock);
4033 task = ctx->task;
4034 goto retry;
4035 }
4036
4037 __perf_event_period(&pe);
4003 raw_spin_unlock_irq(&ctx->lock); 4038 raw_spin_unlock_irq(&ctx->lock);
4004 4039
4005 return ret; 4040 return 0;
4006} 4041}
4007 4042
4008static const struct file_operations perf_fops; 4043static const struct file_operations perf_fops;
@@ -4358,14 +4393,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
4358 rcu_read_unlock(); 4393 rcu_read_unlock();
4359} 4394}
4360 4395
4361static void rb_free_rcu(struct rcu_head *rcu_head)
4362{
4363 struct ring_buffer *rb;
4364
4365 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
4366 rb_free(rb);
4367}
4368
4369struct ring_buffer *ring_buffer_get(struct perf_event *event) 4396struct ring_buffer *ring_buffer_get(struct perf_event *event)
4370{ 4397{
4371 struct ring_buffer *rb; 4398 struct ring_buffer *rb;
@@ -4748,12 +4775,20 @@ static const struct file_operations perf_fops = {
4748 * to user-space before waking everybody up. 4775 * to user-space before waking everybody up.
4749 */ 4776 */
4750 4777
4778static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4779{
4780 /* only the parent has fasync state */
4781 if (event->parent)
4782 event = event->parent;
4783 return &event->fasync;
4784}
4785
4751void perf_event_wakeup(struct perf_event *event) 4786void perf_event_wakeup(struct perf_event *event)
4752{ 4787{
4753 ring_buffer_wakeup(event); 4788 ring_buffer_wakeup(event);
4754 4789
4755 if (event->pending_kill) { 4790 if (event->pending_kill) {
4756 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4791 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
4757 event->pending_kill = 0; 4792 event->pending_kill = 0;
4758 } 4793 }
4759} 4794}
@@ -6132,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
6132 else 6167 else
6133 perf_event_output(event, data, regs); 6168 perf_event_output(event, data, regs);
6134 6169
6135 if (event->fasync && event->pending_kill) { 6170 if (*perf_event_fasync(event) && event->pending_kill) {
6136 event->pending_wakeup = 1; 6171 event->pending_wakeup = 1;
6137 irq_work_queue(&event->pending); 6172 irq_work_queue(&event->pending);
6138 } 6173 }
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 2deb24c7a40d..2bbad9c1274c 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -11,6 +11,7 @@
11struct ring_buffer { 11struct ring_buffer {
12 atomic_t refcount; 12 atomic_t refcount;
13 struct rcu_head rcu_head; 13 struct rcu_head rcu_head;
14 struct irq_work irq_work;
14#ifdef CONFIG_PERF_USE_VMALLOC 15#ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work; 16 struct work_struct work;
16 int page_order; /* allocation order */ 17 int page_order; /* allocation order */
@@ -55,6 +56,15 @@ struct ring_buffer {
55}; 56};
56 57
57extern void rb_free(struct ring_buffer *rb); 58extern void rb_free(struct ring_buffer *rb);
59
60static inline void rb_free_rcu(struct rcu_head *rcu_head)
61{
62 struct ring_buffer *rb;
63
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 rb_free(rb);
66}
67
58extern struct ring_buffer * 68extern struct ring_buffer *
59rb_alloc(int nr_pages, long watermark, int cpu, int flags); 69rb_alloc(int nr_pages, long watermark, int cpu, int flags);
60extern void perf_event_wakeup(struct perf_event *event); 70extern void perf_event_wakeup(struct perf_event *event);
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 96472824a752..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
221 rcu_read_unlock(); 221 rcu_read_unlock();
222} 222}
223 223
224static void rb_irq_work(struct irq_work *work);
225
224static void 226static void
225ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) 227ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
226{ 228{
@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
241 243
242 INIT_LIST_HEAD(&rb->event_list); 244 INIT_LIST_HEAD(&rb->event_list);
243 spin_lock_init(&rb->event_lock); 245 spin_lock_init(&rb->event_lock);
246 init_irq_work(&rb->irq_work, rb_irq_work);
247}
248
249static void ring_buffer_put_async(struct ring_buffer *rb)
250{
251 if (!atomic_dec_and_test(&rb->refcount))
252 return;
253
254 rb->rcu_head.next = (void *)rb;
255 irq_work_queue(&rb->irq_work);
244} 256}
245 257
246/* 258/*
@@ -319,7 +331,7 @@ err_put:
319 rb_free_aux(rb); 331 rb_free_aux(rb);
320 332
321err: 333err:
322 ring_buffer_put(rb); 334 ring_buffer_put_async(rb);
323 handle->event = NULL; 335 handle->event = NULL;
324 336
325 return NULL; 337 return NULL;
@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
370 382
371 local_set(&rb->aux_nest, 0); 383 local_set(&rb->aux_nest, 0);
372 rb_free_aux(rb); 384 rb_free_aux(rb);
373 ring_buffer_put(rb); 385 ring_buffer_put_async(rb);
374} 386}
375 387
376/* 388/*
@@ -547,17 +559,30 @@ static void __rb_free_aux(struct ring_buffer *rb)
547 rb->aux_priv = NULL; 559 rb->aux_priv = NULL;
548 } 560 }
549 561
550 for (pg = 0; pg < rb->aux_nr_pages; pg++) 562 if (rb->aux_nr_pages) {
551 rb_free_aux_page(rb, pg); 563 for (pg = 0; pg < rb->aux_nr_pages; pg++)
564 rb_free_aux_page(rb, pg);
552 565
553 kfree(rb->aux_pages); 566 kfree(rb->aux_pages);
554 rb->aux_nr_pages = 0; 567 rb->aux_nr_pages = 0;
568 }
555} 569}
556 570
557void rb_free_aux(struct ring_buffer *rb) 571void rb_free_aux(struct ring_buffer *rb)
558{ 572{
559 if (atomic_dec_and_test(&rb->aux_refcount)) 573 if (atomic_dec_and_test(&rb->aux_refcount))
574 irq_work_queue(&rb->irq_work);
575}
576
577static void rb_irq_work(struct irq_work *work)
578{
579 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
580
581 if (!atomic_read(&rb->aux_refcount))
560 __rb_free_aux(rb); 582 __rb_free_aux(rb);
583
584 if (rb->rcu_head.next == (void *)rb)
585 call_rcu(&rb->rcu_head, rb_free_rcu);
561} 586}
562 587
563#ifndef CONFIG_PERF_USE_VMALLOC 588#ifndef CONFIG_PERF_USE_VMALLOC
diff --git a/kernel/fork.c b/kernel/fork.c
index 1bfefc6f96a4..dbd9b8d7b7cc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); 287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
288} 288}
289 289
290#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
291/* Initialized by the architecture: */
292int arch_task_struct_size __read_mostly;
293#endif
294
290void __init fork_init(void) 295void __init fork_init(void)
291{ 296{
292#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR 297#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
@@ -295,7 +300,7 @@ void __init fork_init(void)
295#endif 300#endif
296 /* create a slab on which task_structs can be allocated */ 301 /* create a slab on which task_structs can be allocated */
297 task_struct_cachep = 302 task_struct_cachep =
298 kmem_cache_create("task_struct", sizeof(struct task_struct), 303 kmem_cache_create("task_struct", arch_task_struct_size,
299 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL); 304 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
300#endif 305#endif
301 306
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 27f4332c7f84..ae216824e8ca 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -985,6 +985,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
985} 985}
986 986
987/** 987/**
988 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
989 * @data: Pointer to interrupt specific data
990 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
991 *
992 * Conditional, as the underlying parent chip might not implement it.
993 */
994int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
995{
996 data = data->parent_data;
997
998 if (data->chip->irq_set_type)
999 return data->chip->irq_set_type(data, type);
1000
1001 return -ENOSYS;
1002}
1003
1004/**
988 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1005 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
989 * @data: Pointer to interrupt specific data 1006 * @data: Pointer to interrupt specific data
990 * 1007 *
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
997 if (data->chip && data->chip->irq_retrigger) 1014 if (data->chip && data->chip->irq_retrigger)
998 return data->chip->irq_retrigger(data); 1015 return data->chip->irq_retrigger(data);
999 1016
1000 return -ENOSYS; 1017 return 0;
1001} 1018}
1002 1019
1003/** 1020/**
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4834ee828c41..61008b8433ab 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
76 76
77#ifdef CONFIG_SPARSE_IRQ 77#ifdef CONFIG_SPARSE_IRQ
78static inline void irq_mark_irq(unsigned int irq) { } 78static inline void irq_mark_irq(unsigned int irq) { }
79extern void irq_lock_sparse(void);
80extern void irq_unlock_sparse(void);
81#else 79#else
82extern void irq_mark_irq(unsigned int irq); 80extern void irq_mark_irq(unsigned int irq);
83static inline void irq_lock_sparse(void) { }
84static inline void irq_unlock_sparse(void) { }
85#endif 81#endif
86 82
87extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 83extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 9065107f083e..7a5237a1bce5 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { 75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
76#ifdef CONFIG_HARDIRQS_SW_RESEND 76#ifdef CONFIG_HARDIRQS_SW_RESEND
77 /* 77 /*
78 * If the interrupt has a parent irq and runs 78 * If the interrupt is running in the thread
79 * in the thread context of the parent irq, 79 * context of the parent irq we need to be
80 * retrigger the parent. 80 * careful, because we cannot trigger it
81 * directly.
81 */ 82 */
82 if (desc->parent_irq && 83 if (irq_settings_is_nested_thread(desc)) {
83 irq_settings_is_nested_thread(desc)) 84 /*
85 * If the parent_irq is valid, we
86 * retrigger the parent, otherwise we
87 * do nothing.
88 */
89 if (!desc->parent_irq)
90 return;
84 irq = desc->parent_irq; 91 irq = desc->parent_irq;
92 }
85 /* Set it pending and activate the softirq: */ 93 /* Set it pending and activate the softirq: */
86 set_bit(irq, irqs_resend); 94 set_bit(irq, irqs_resend);
87 tasklet_schedule(&resend_tasklet); 95 tasklet_schedule(&resend_tasklet);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
97{ 97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99} 99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
100 101
101/** 102/**
102 * kthread_freezable_should_stop - should this freezable kthread return now? 103 * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
171{ 172{
172 __kthread_parkme(to_kthread(current)); 173 __kthread_parkme(to_kthread(current));
173} 174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
174 176
175static int kthread(void *_create) 177static int kthread(void *_create)
176{ 178{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
411 if (kthread) 413 if (kthread)
412 __kthread_unpark(k, kthread); 414 __kthread_unpark(k, kthread);
413} 415}
416EXPORT_SYMBOL_GPL(kthread_unpark);
414 417
415/** 418/**
416 * kthread_park - park a thread created by kthread_create(). 419 * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
441 } 444 }
442 return ret; 445 return ret;
443} 446}
447EXPORT_SYMBOL_GPL(kthread_park);
444 448
445/** 449/**
446 * kthread_stop - stop a thread created by kthread_create(). 450 * kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/hash.h> 5#include <linux/hash.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/debug_locks.h>
7 8
8/* 9/*
9 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead 10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
286{ 287{
287 struct __qspinlock *l = (void *)lock; 288 struct __qspinlock *l = (void *)lock;
288 struct pv_node *node; 289 struct pv_node *node;
290 u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
289 291
290 /* 292 /*
291 * We must not unlock if SLOW, because in that case we must first 293 * We must not unlock if SLOW, because in that case we must first
292 * unhash. Otherwise it would be possible to have multiple @lock 294 * unhash. Otherwise it would be possible to have multiple @lock
293 * entries, which would be BAD. 295 * entries, which would be BAD.
294 */ 296 */
295 if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) 297 if (likely(lockval == _Q_LOCKED_VAL))
296 return; 298 return;
297 299
300 if (unlikely(lockval != _Q_SLOW_VAL)) {
301 if (debug_locks_silent)
302 return;
303 WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
304 return;
305 }
306
298 /* 307 /*
299 * Since the above failed to release, this must be the SLOW path. 308 * Since the above failed to release, this must be the SLOW path.
300 * Therefore start by looking up the blocked node and unhashing it. 309 * Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/module.c b/kernel/module.c
index 3e0e19763d24..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
602} 602}
603EXPORT_SYMBOL_GPL(find_symbol); 603EXPORT_SYMBOL_GPL(find_symbol);
604 604
605/* Search for module by name: must hold module_mutex. */ 605/*
606 * Search for module by name: must hold module_mutex (or preempt disabled
607 * for read-only access).
608 */
606static struct module *find_module_all(const char *name, size_t len, 609static struct module *find_module_all(const char *name, size_t len,
607 bool even_unformed) 610 bool even_unformed)
608{ 611{
609 struct module *mod; 612 struct module *mod;
610 613
611 module_assert_mutex(); 614 module_assert_mutex_or_preempt();
612 615
613 list_for_each_entry(mod, &modules, list) { 616 list_for_each_entry(mod, &modules, list) {
614 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
621 624
622struct module *find_module(const char *name) 625struct module *find_module(const char *name)
623{ 626{
627 module_assert_mutex();
624 return find_module_all(name, strlen(name), false); 628 return find_module_all(name, strlen(name), false);
625} 629}
626EXPORT_SYMBOL_GPL(find_module); 630EXPORT_SYMBOL_GPL(find_module);
@@ -3557,6 +3561,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
3557 mutex_lock(&module_mutex); 3561 mutex_lock(&module_mutex);
3558 /* Unlink carefully: kallsyms could be walking list. */ 3562 /* Unlink carefully: kallsyms could be walking list. */
3559 list_del_rcu(&mod->list); 3563 list_del_rcu(&mod->list);
3564 mod_tree_remove(mod);
3560 wake_up_all(&module_wq); 3565 wake_up_all(&module_wq);
3561 /* Wait for RCU-sched synchronizing before releasing mod->list. */ 3566 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3562 synchronize_sched(); 3567 synchronize_sched();
diff --git a/kernel/resource.c b/kernel/resource.c
index 90552aab5f2d..fed052a1bc9f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
504{ 504{
505 struct resource *p; 505 struct resource *p;
506 resource_size_t end = start + size - 1; 506 resource_size_t end = start + size - 1;
507 int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
508 const char *name = "System RAM"; 508 const char *name = "System RAM";
509 int ret = -1; 509 int ret = -1;
510 510
511 read_lock(&resource_lock); 511 read_lock(&resource_lock);
512 for (p = iomem_resource.child; p ; p = p->sibling) { 512 for (p = iomem_resource.child; p ; p = p->sibling) {
513 if (end < p->start) 513 if (p->end < start)
514 continue; 514 continue;
515 515
516 if (p->start <= start && end <= p->end) { 516 if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
521 ret = 1; 521 ret = 1;
522 break; 522 break;
523 } 523 }
524 if (p->end < start) 524 if (end < p->start)
525 break; /* not found */ 525 break; /* not found */
526 } 526 }
527 read_unlock(&resource_lock); 527 read_unlock(&resource_lock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 65c8f3ebdc3c..d113c3ba8bc4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
3683 cfs_rq->throttled = 1; 3683 cfs_rq->throttled = 1;
3684 cfs_rq->throttled_clock = rq_clock(rq); 3684 cfs_rq->throttled_clock = rq_clock(rq);
3685 raw_spin_lock(&cfs_b->lock); 3685 raw_spin_lock(&cfs_b->lock);
3686 empty = list_empty(&cfs_rq->throttled_list); 3686 empty = list_empty(&cfs_b->throttled_cfs_rq);
3687 3687
3688 /* 3688 /*
3689 * Add to the _head_ of the list, so that an already-started 3689 * Add to the _head_ of the list, so that an already-started
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2748 * Other callers might not initialize the si_lsb field, 2748 * Other callers might not initialize the si_lsb field,
2749 * so check explicitly for the right codes here. 2749 * so check explicitly for the right codes here.
2750 */ 2750 */
2751 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2751 if (from->si_signo == SIGBUS &&
2752 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2752 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2753 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2753#endif 2754#endif
2754#ifdef SEGV_BNDERR 2755#ifdef SEGV_BNDERR
2755 err |= __put_user(from->si_lower, &to->si_lower); 2756 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2756 err |= __put_user(from->si_upper, &to->si_upper); 2757 err |= __put_user(from->si_lower, &to->si_lower);
2758 err |= __put_user(from->si_upper, &to->si_upper);
2759 }
2757#endif 2760#endif
2758 break; 2761 break;
2759 case __SI_CHLD: 2762 case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3017 int, sig, 3020 int, sig,
3018 struct compat_siginfo __user *, uinfo) 3021 struct compat_siginfo __user *, uinfo)
3019{ 3022{
3020 siginfo_t info; 3023 siginfo_t info = {};
3021 int ret = copy_siginfo_from_user32(&info, uinfo); 3024 int ret = copy_siginfo_from_user32(&info, uinfo);
3022 if (unlikely(ret)) 3025 if (unlikely(ret))
3023 return ret; 3026 return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3061 int, sig, 3064 int, sig,
3062 struct compat_siginfo __user *, uinfo) 3065 struct compat_siginfo __user *, uinfo)
3063{ 3066{
3064 siginfo_t info; 3067 siginfo_t info = {};
3065 3068
3066 if (copy_siginfo_from_user32(&info, uinfo)) 3069 if (copy_siginfo_from_user32(&info, uinfo))
3067 return -EFAULT; 3070 return -EFAULT;
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 08ccc3da3ca0..50eb107f1198 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev,
120 /* The clockevent device is getting replaced. Shut it down. */ 120 /* The clockevent device is getting replaced. Shut it down. */
121 121
122 case CLOCK_EVT_STATE_SHUTDOWN: 122 case CLOCK_EVT_STATE_SHUTDOWN:
123 return dev->set_state_shutdown(dev); 123 if (dev->set_state_shutdown)
124 return dev->set_state_shutdown(dev);
125 return 0;
124 126
125 case CLOCK_EVT_STATE_PERIODIC: 127 case CLOCK_EVT_STATE_PERIODIC:
126 /* Core internal bug */ 128 /* Core internal bug */
127 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 129 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
128 return -ENOSYS; 130 return -ENOSYS;
129 return dev->set_state_periodic(dev); 131 if (dev->set_state_periodic)
132 return dev->set_state_periodic(dev);
133 return 0;
130 134
131 case CLOCK_EVT_STATE_ONESHOT: 135 case CLOCK_EVT_STATE_ONESHOT:
132 /* Core internal bug */ 136 /* Core internal bug */
133 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
134 return -ENOSYS; 138 return -ENOSYS;
135 return dev->set_state_oneshot(dev); 139 if (dev->set_state_oneshot)
140 return dev->set_state_oneshot(dev);
141 return 0;
136 142
137 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 143 case CLOCK_EVT_STATE_ONESHOT_STOPPED:
138 /* Core internal bug */ 144 /* Core internal bug */
@@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
471 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 477 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
472 return 0; 478 return 0;
473 479
474 /* New state-specific callbacks */
475 if (!dev->set_state_shutdown)
476 return -EINVAL;
477
478 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
479 !dev->set_state_periodic)
480 return -EINVAL;
481
482 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
483 !dev->set_state_oneshot)
484 return -EINVAL;
485
486 return 0; 480 return 0;
487} 481}
488 482
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d39f32cdd1b5..f6aae7977824 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
159{ 159{
160 struct clock_event_device *bc = tick_broadcast_device.evtdev; 160 struct clock_event_device *bc = tick_broadcast_device.evtdev;
161 unsigned long flags; 161 unsigned long flags;
162 int ret; 162 int ret = 0;
163 163
164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 164 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
165 165
@@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
221 * If we kept the cpu in the broadcast mask, 221 * If we kept the cpu in the broadcast mask,
222 * tell the caller to leave the per cpu device 222 * tell the caller to leave the per cpu device
223 * in shutdown state. The periodic interrupt 223 * in shutdown state. The periodic interrupt
224 * is delivered by the broadcast device. 224 * is delivered by the broadcast device, if
225 * the broadcast device exists and is not
226 * hrtimer based.
225 */ 227 */
226 ret = cpumask_test_cpu(cpu, tick_broadcast_mask); 228 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
229 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
227 break; 230 break;
228 default: 231 default:
229 /* Nothing to do */
230 ret = 0;
231 break; 232 break;
232 } 233 }
233 } 234 }
@@ -265,8 +266,22 @@ static bool tick_do_broadcast(struct cpumask *mask)
265 * Check, if the current cpu is in the mask 266 * Check, if the current cpu is in the mask
266 */ 267 */
267 if (cpumask_test_cpu(cpu, mask)) { 268 if (cpumask_test_cpu(cpu, mask)) {
269 struct clock_event_device *bc = tick_broadcast_device.evtdev;
270
268 cpumask_clear_cpu(cpu, mask); 271 cpumask_clear_cpu(cpu, mask);
269 local = true; 272 /*
273 * We only run the local handler, if the broadcast
274 * device is not hrtimer based. Otherwise we run into
275 * a hrtimer recursion.
276 *
277 * local timer_interrupt()
278 * local_handler()
279 * expire_hrtimers()
280 * bc_handler()
281 * local_handler()
282 * expire_hrtimers()
283 */
284 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
270 } 285 }
271 286
272 if (!cpumask_empty(mask)) { 287 if (!cpumask_empty(mask)) {
@@ -301,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
301 bool bc_local; 316 bool bc_local;
302 317
303 raw_spin_lock(&tick_broadcast_lock); 318 raw_spin_lock(&tick_broadcast_lock);
319
320 /* Handle spurious interrupts gracefully */
321 if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
322 raw_spin_unlock(&tick_broadcast_lock);
323 return;
324 }
325
304 bc_local = tick_do_periodic_broadcast(); 326 bc_local = tick_do_periodic_broadcast();
305 327
306 if (clockevent_state_oneshot(dev)) { 328 if (clockevent_state_oneshot(dev)) {
@@ -359,8 +381,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
359 case TICK_BROADCAST_ON: 381 case TICK_BROADCAST_ON:
360 cpumask_set_cpu(cpu, tick_broadcast_on); 382 cpumask_set_cpu(cpu, tick_broadcast_on);
361 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { 383 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
362 if (tick_broadcast_device.mode == 384 /*
363 TICKDEV_MODE_PERIODIC) 385 * Only shutdown the cpu local device, if:
386 *
387 * - the broadcast device exists
388 * - the broadcast device is not a hrtimer based one
389 * - the broadcast device is in periodic mode to
390 * avoid a hickup during switch to oneshot mode
391 */
392 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
393 tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
364 clockevents_shutdown(dev); 394 clockevents_shutdown(dev);
365 } 395 }
366 break; 396 break;
@@ -379,14 +409,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
379 break; 409 break;
380 } 410 }
381 411
382 if (cpumask_empty(tick_broadcast_mask)) { 412 if (bc) {
383 if (!bc_stopped) 413 if (cpumask_empty(tick_broadcast_mask)) {
384 clockevents_shutdown(bc); 414 if (!bc_stopped)
385 } else if (bc_stopped) { 415 clockevents_shutdown(bc);
386 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 416 } else if (bc_stopped) {
387 tick_broadcast_start_periodic(bc); 417 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
388 else 418 tick_broadcast_start_periodic(bc);
389 tick_broadcast_setup_oneshot(bc); 419 else
420 tick_broadcast_setup_oneshot(bc);
421 }
390 } 422 }
391 raw_spin_unlock(&tick_broadcast_lock); 423 raw_spin_unlock(&tick_broadcast_lock);
392} 424}
@@ -662,71 +694,82 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
662 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 694 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
663} 695}
664 696
665/** 697int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
666 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
667 * @state: The target state (enter/exit)
668 *
669 * The system enters/leaves a state, where affected devices might stop
670 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
671 *
672 * Called with interrupts disabled, so clockevents_lock is not
673 * required here because the local clock event device cannot go away
674 * under us.
675 */
676int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
677{ 698{
678 struct clock_event_device *bc, *dev; 699 struct clock_event_device *bc, *dev;
679 struct tick_device *td;
680 int cpu, ret = 0; 700 int cpu, ret = 0;
681 ktime_t now; 701 ktime_t now;
682 702
683 /* 703 /*
684 * Periodic mode does not care about the enter/exit of power 704 * If there is no broadcast device, tell the caller not to go
685 * states 705 * into deep idle.
686 */ 706 */
687 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 707 if (!tick_broadcast_device.evtdev)
688 return 0; 708 return -EBUSY;
689 709
690 /* 710 dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
691 * We are called with preemtion disabled from the depth of the
692 * idle code, so we can't be moved away.
693 */
694 td = this_cpu_ptr(&tick_cpu_device);
695 dev = td->evtdev;
696
697 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
698 return 0;
699 711
700 raw_spin_lock(&tick_broadcast_lock); 712 raw_spin_lock(&tick_broadcast_lock);
701 bc = tick_broadcast_device.evtdev; 713 bc = tick_broadcast_device.evtdev;
702 cpu = smp_processor_id(); 714 cpu = smp_processor_id();
703 715
704 if (state == TICK_BROADCAST_ENTER) { 716 if (state == TICK_BROADCAST_ENTER) {
717 /*
718 * If the current CPU owns the hrtimer broadcast
719 * mechanism, it cannot go deep idle and we do not add
720 * the CPU to the broadcast mask. We don't have to go
721 * through the EXIT path as the local timer is not
722 * shutdown.
723 */
724 ret = broadcast_needs_cpu(bc, cpu);
725 if (ret)
726 goto out;
727
728 /*
729 * If the broadcast device is in periodic mode, we
730 * return.
731 */
732 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
733 /* If it is a hrtimer based broadcast, return busy */
734 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
735 ret = -EBUSY;
736 goto out;
737 }
738
705 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 739 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
706 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 740 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
741
742 /* Conditionally shut down the local timer. */
707 broadcast_shutdown_local(bc, dev); 743 broadcast_shutdown_local(bc, dev);
744
708 /* 745 /*
709 * We only reprogram the broadcast timer if we 746 * We only reprogram the broadcast timer if we
710 * did not mark ourself in the force mask and 747 * did not mark ourself in the force mask and
711 * if the cpu local event is earlier than the 748 * if the cpu local event is earlier than the
712 * broadcast event. If the current CPU is in 749 * broadcast event. If the current CPU is in
713 * the force mask, then we are going to be 750 * the force mask, then we are going to be
714 * woken by the IPI right away. 751 * woken by the IPI right away; we return
752 * busy, so the CPU does not try to go deep
753 * idle.
715 */ 754 */
716 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && 755 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
717 dev->next_event.tv64 < bc->next_event.tv64) 756 ret = -EBUSY;
757 } else if (dev->next_event.tv64 < bc->next_event.tv64) {
718 tick_broadcast_set_event(bc, cpu, dev->next_event); 758 tick_broadcast_set_event(bc, cpu, dev->next_event);
759 /*
760 * In case of hrtimer broadcasts the
761 * programming might have moved the
762 * timer to this cpu. If yes, remove
763 * us from the broadcast mask and
764 * return busy.
765 */
766 ret = broadcast_needs_cpu(bc, cpu);
767 if (ret) {
768 cpumask_clear_cpu(cpu,
769 tick_broadcast_oneshot_mask);
770 }
771 }
719 } 772 }
720 /*
721 * If the current CPU owns the hrtimer broadcast
722 * mechanism, it cannot go deep idle and we remove the
723 * CPU from the broadcast mask. We don't have to go
724 * through the EXIT path as the local timer is not
725 * shutdown.
726 */
727 ret = broadcast_needs_cpu(bc, cpu);
728 if (ret)
729 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
730 } else { 773 } else {
731 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 774 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
732 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); 775 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
@@ -796,7 +839,6 @@ out:
796 raw_spin_unlock(&tick_broadcast_lock); 839 raw_spin_unlock(&tick_broadcast_lock);
797 return ret; 840 return ret;
798} 841}
799EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
800 842
801/* 843/*
802 * Reset the one shot broadcast for a cpu 844 * Reset the one shot broadcast for a cpu
@@ -938,6 +980,16 @@ bool tick_broadcast_oneshot_available(void)
938 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; 980 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
939} 981}
940 982
983#else
984int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
985{
986 struct clock_event_device *bc = tick_broadcast_device.evtdev;
987
988 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
989 return -EBUSY;
990
991 return 0;
992}
941#endif 993#endif
942 994
943void __init tick_broadcast_init(void) 995void __init tick_broadcast_init(void)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 76446cb5dfe1..f8bf47571dda 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -343,6 +343,28 @@ out_bc:
343 tick_install_broadcast_device(newdev); 343 tick_install_broadcast_device(newdev);
344} 344}
345 345
346/**
347 * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
348 * @state: The target state (enter/exit)
349 *
350 * The system enters/leaves a state, where affected devices might stop
351 * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
352 *
353 * Called with interrupts disabled, so clockevents_lock is not
354 * required here because the local clock event device cannot go away
355 * under us.
356 */
357int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
358{
359 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
360
361 if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
362 return 0;
363
364 return __tick_broadcast_oneshot_control(state);
365}
366EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
367
346#ifdef CONFIG_HOTPLUG_CPU 368#ifdef CONFIG_HOTPLUG_CPU
347/* 369/*
348 * Transfer the do_timer job away from a dying cpu. 370 * Transfer the do_timer job away from a dying cpu.
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index 42fdf4958bcc..a4a8d4e9baa1 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu);
71static inline void tick_cancel_sched_timer(int cpu) { } 71static inline void tick_cancel_sched_timer(int cpu) { }
72#endif 72#endif
73 73
74#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
75extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
76#else
77static inline int
78__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
79{
80 return -EBUSY;
81}
82#endif
83
74#endif 84#endif
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5e097fa9faf7..84190f02b521 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
807 spin_unlock(&base->lock); 807 spin_unlock(&base->lock);
808 base = new_base; 808 base = new_base;
809 spin_lock(&base->lock); 809 spin_lock(&base->lock);
810 timer->flags &= ~TIMER_BASEMASK; 810 WRITE_ONCE(timer->flags,
811 timer->flags |= base->cpu; 811 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
812 } 812 }
813 } 813 }
814 814
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
98 struct pid *pid; 98 struct pid *pid;
99}; 99};
100 100
101static bool ftrace_pids_enabled(void)
102{
103 return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
101/* 108/*
102 * ftrace_disabled is set when an anomaly is discovered. 109 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled. 110 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
109static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113static struct ftrace_ops global_ops; 119static struct ftrace_ops global_ops;
114static struct ftrace_ops control_ops; 120static struct ftrace_ops control_ops;
115 121
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183 if (!test_tsk_trace_trace(current)) 189 if (!test_tsk_trace_trace(current))
184 return; 190 return;
185 191
186 ftrace_pid_function(ip, parent_ip, op, regs); 192 op->saved_func(ip, parent_ip, op, regs);
187}
188
189static void set_ftrace_pid_function(ftrace_func_t func)
190{
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194} 193}
195 194
196/** 195/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
202void clear_ftrace_function(void) 201void clear_ftrace_function(void)
203{ 202{
204 ftrace_trace_function = ftrace_stub; 203 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206} 204}
207 205
208static void control_ops_disable_all(struct ftrace_ops *ops) 206static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
436 } else 434 } else
437 add_ftrace_ops(&ftrace_ops_list, ops); 435 add_ftrace_ops(&ftrace_ops_list, ops);
438 436
437 /* Always save the function, and reset at unregistering */
438 ops->saved_func = ops->func;
439
440 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
441 ops->func = ftrace_pid_func;
442
439 ftrace_update_trampoline(ops); 443 ftrace_update_trampoline(ops);
440 444
441 if (ftrace_enabled) 445 if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
463 if (ftrace_enabled) 467 if (ftrace_enabled)
464 update_ftrace_function(); 468 update_ftrace_function();
465 469
470 ops->func = ops->saved_func;
471
466 return 0; 472 return 0;
467} 473}
468 474
469static void ftrace_update_pid_func(void) 475static void ftrace_update_pid_func(void)
470{ 476{
477 bool enabled = ftrace_pids_enabled();
478 struct ftrace_ops *op;
479
471 /* Only do something if we are tracing something */ 480 /* Only do something if we are tracing something */
472 if (ftrace_trace_function == ftrace_stub) 481 if (ftrace_trace_function == ftrace_stub)
473 return; 482 return;
474 483
484 do_for_each_ftrace_op(op, ftrace_ops_list) {
485 if (op->flags & FTRACE_OPS_FL_PID) {
486 op->func = enabled ? ftrace_pid_func :
487 op->saved_func;
488 ftrace_update_trampoline(op);
489 }
490 } while_for_each_ftrace_op(op);
491
475 update_ftrace_function(); 492 update_ftrace_function();
476} 493}
477 494
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
1133 .local_hash.filter_hash = EMPTY_HASH, 1150 .local_hash.filter_hash = EMPTY_HASH,
1134 INIT_OPS_HASH(global_ops) 1151 INIT_OPS_HASH(global_ops)
1135 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1152 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1136 FTRACE_OPS_FL_INITIALIZED, 1153 FTRACE_OPS_FL_INITIALIZED |
1154 FTRACE_OPS_FL_PID,
1137}; 1155};
1138 1156
1139/* 1157/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5023 5041
5024static struct ftrace_ops global_ops = { 5042static struct ftrace_ops global_ops = {
5025 .func = ftrace_stub, 5043 .func = ftrace_stub,
5026 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 5044 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5045 FTRACE_OPS_FL_INITIALIZED |
5046 FTRACE_OPS_FL_PID,
5027}; 5047};
5028 5048
5029static int __init ftrace_nodyn_init(void) 5049static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5080 if (WARN_ON(tr->ops->func != ftrace_stub)) 5100 if (WARN_ON(tr->ops->func != ftrace_stub))
5081 printk("ftrace ops had %pS for function\n", 5101 printk("ftrace ops had %pS for function\n",
5082 tr->ops->func); 5102 tr->ops->func);
5083 /* Only the top level instance does pid tracing */
5084 if (!list_empty(&ftrace_pids)) {
5085 set_ftrace_pid_function(func);
5086 func = ftrace_pid_func;
5087 }
5088 } 5103 }
5089 tr->ops->func = func; 5104 tr->ops->func = func;
5090 tr->ops->private = tr; 5105 tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5371{ 5386{
5372 mutex_lock(&ftrace_lock); 5387 mutex_lock(&ftrace_lock);
5373 5388
5374 if (list_empty(&ftrace_pids) && (!*pos)) 5389 if (!ftrace_pids_enabled() && (!*pos))
5375 return (void *) 1; 5390 return (void *) 1;
5376 5391
5377 return seq_list_start(&ftrace_pids, *pos); 5392 return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5610 .func = ftrace_stub, 5625 .func = ftrace_stub,
5611 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5626 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5612 FTRACE_OPS_FL_INITIALIZED | 5627 FTRACE_OPS_FL_INITIALIZED |
5628 FTRACE_OPS_FL_PID |
5613 FTRACE_OPS_FL_STUB, 5629 FTRACE_OPS_FL_STUB,
5614#ifdef FTRACE_GRAPH_TRAMP_ADDR 5630#ifdef FTRACE_GRAPH_TRAMP_ADDR
5615 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5631 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f060716b02ae..74bde81601a9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -444,6 +444,7 @@ enum {
444 444
445 TRACE_CONTROL_BIT, 445 TRACE_CONTROL_BIT,
446 446
447 TRACE_BRANCH_BIT,
447/* 448/*
448 * Abuse of the trace_recursion. 449 * Abuse of the trace_recursion.
449 * As we need a way to maintain state if we are tracing the function 450 * As we need a way to maintain state if we are tracing the function
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index a87b43f49eb4..e2e12ad3186f 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
36 struct trace_branch *entry; 36 struct trace_branch *entry;
37 struct ring_buffer *buffer; 37 struct ring_buffer *buffer;
38 unsigned long flags; 38 unsigned long flags;
39 int cpu, pc; 39 int pc;
40 const char *p; 40 const char *p;
41 41
42 if (current->trace_recursion & TRACE_BRANCH_BIT)
43 return;
44
42 /* 45 /*
43 * I would love to save just the ftrace_likely_data pointer, but 46 * I would love to save just the ftrace_likely_data pointer, but
44 * this code can also be used by modules. Ugly things can happen 47 * this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
49 if (unlikely(!tr)) 52 if (unlikely(!tr))
50 return; 53 return;
51 54
52 local_irq_save(flags); 55 raw_local_irq_save(flags);
53 cpu = raw_smp_processor_id(); 56 current->trace_recursion |= TRACE_BRANCH_BIT;
54 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 57 data = this_cpu_ptr(tr->trace_buffer.data);
55 if (atomic_inc_return(&data->disabled) != 1) 58 if (atomic_read(&data->disabled))
56 goto out; 59 goto out;
57 60
58 pc = preempt_count(); 61 pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
81 __buffer_unlock_commit(buffer, event); 84 __buffer_unlock_commit(buffer, event);
82 85
83 out: 86 out:
84 atomic_dec(&data->disabled); 87 current->trace_recursion &= ~TRACE_BRANCH_BIT;
85 local_irq_restore(flags); 88 raw_local_irq_restore(flags);
86} 89}
87 90
88static inline 91static inline
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 777eda7d1ab4..39f24d6721e5 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -18,10 +18,6 @@ config KASAN
18 For better error detection enable CONFIG_STACKTRACE, 18 For better error detection enable CONFIG_STACKTRACE,
19 and add slub_debug=U to boot cmdline. 19 and add slub_debug=U to boot cmdline.
20 20
21config KASAN_SHADOW_OFFSET
22 hex
23 default 0xdffffc0000000000 if X86_64
24
25choice 21choice
26 prompt "Instrumentation type" 22 prompt "Instrumentation type"
27 depends on KASAN 23 depends on KASAN
diff --git a/lib/decompress.c b/lib/decompress.c
index 528ff932d8e4..62696dff5730 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
59{ 59{
60 const struct compress_format *cf; 60 const struct compress_format *cf;
61 61
62 if (len < 2) 62 if (len < 2) {
63 if (name)
64 *name = NULL;
63 return NULL; /* Need at least this much... */ 65 return NULL; /* Need at least this much... */
66 }
64 67
65 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]); 68 pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
66 69
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index ae4b65e17e64..dace71fe41f7 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
574 unsigned long flags; 574 unsigned long flags;
575 phys_addr_t cln; 575 phys_addr_t cln;
576 576
577 if (dma_debug_disabled())
578 return;
579
577 if (!page) 580 if (!page)
578 return; 581 return;
579 582
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 7ea09699855d..8d74c20d8595 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -11,6 +11,7 @@
11#include <linux/ctype.h> 11#include <linux/ctype.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/export.h> 13#include <linux/export.h>
14#include <asm/unaligned.h>
14 15
15const char hex_asc[] = "0123456789abcdef"; 16const char hex_asc[] = "0123456789abcdef";
16EXPORT_SYMBOL(hex_asc); 17EXPORT_SYMBOL(hex_asc);
@@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
139 for (j = 0; j < ngroups; j++) { 140 for (j = 0; j < ngroups; j++) {
140 ret = snprintf(linebuf + lx, linebuflen - lx, 141 ret = snprintf(linebuf + lx, linebuflen - lx,
141 "%s%16.16llx", j ? " " : "", 142 "%s%16.16llx", j ? " " : "",
142 (unsigned long long)*(ptr8 + j)); 143 get_unaligned(ptr8 + j));
143 if (ret >= linebuflen - lx) 144 if (ret >= linebuflen - lx)
144 goto overflow1; 145 goto overflow1;
145 lx += ret; 146 lx += ret;
@@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
150 for (j = 0; j < ngroups; j++) { 151 for (j = 0; j < ngroups; j++) {
151 ret = snprintf(linebuf + lx, linebuflen - lx, 152 ret = snprintf(linebuf + lx, linebuflen - lx,
152 "%s%8.8x", j ? " " : "", 153 "%s%8.8x", j ? " " : "",
153 *(ptr4 + j)); 154 get_unaligned(ptr4 + j));
154 if (ret >= linebuflen - lx) 155 if (ret >= linebuflen - lx)
155 goto overflow1; 156 goto overflow1;
156 lx += ret; 157 lx += ret;
@@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
161 for (j = 0; j < ngroups; j++) { 162 for (j = 0; j < ngroups; j++) {
162 ret = snprintf(linebuf + lx, linebuflen - lx, 163 ret = snprintf(linebuf + lx, linebuflen - lx,
163 "%s%4.4x", j ? " " : "", 164 "%s%4.4x", j ? " " : "",
164 *(ptr2 + j)); 165 get_unaligned(ptr2 + j));
165 if (ret >= linebuflen - lx) 166 if (ret >= linebuflen - lx)
166 goto overflow1; 167 goto overflow1;
167 lx += ret; 168 lx += ret;
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
119 unsigned long align_mask = 0; 119 unsigned long align_mask = 0;
120 120
121 if (align_order > 0) 121 if (align_order > 0)
122 align_mask = 0xffffffffffffffffl >> (64 - align_order); 122 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
123 123
124 /* Sanity check */ 124 /* Sanity check */
125 if (unlikely(npages == 0)) { 125 if (unlikely(npages == 0)) {
diff --git a/lib/kobject.c b/lib/kobject.c
index 2e3bd01964a9..3e3a5c3cb330 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -337,8 +337,9 @@ error:
337} 337}
338EXPORT_SYMBOL(kobject_init); 338EXPORT_SYMBOL(kobject_init);
339 339
340static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, 340static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
341 const char *fmt, va_list vargs) 341 struct kobject *parent,
342 const char *fmt, va_list vargs)
342{ 343{
343 int retval; 344 int retval;
344 345
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a60a6d335a91..cc0c69710dcf 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -610,6 +610,8 @@ next:
610 iter->skip = 0; 610 iter->skip = 0;
611 } 611 }
612 612
613 iter->p = NULL;
614
613 /* Ensure we see any new tables. */ 615 /* Ensure we see any new tables. */
614 smp_rmb(); 616 smp_rmb();
615 617
@@ -620,8 +622,6 @@ next:
620 return ERR_PTR(-EAGAIN); 622 return ERR_PTR(-EAGAIN);
621 } 623 }
622 624
623 iter->p = NULL;
624
625 return NULL; 625 return NULL;
626} 626}
627EXPORT_SYMBOL_GPL(rhashtable_walk_next); 627EXPORT_SYMBOL_GPL(rhashtable_walk_next);
diff --git a/mm/cma.h b/mm/cma.h
index 1132d733556d..17c75a4246c8 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
16extern struct cma cma_areas[MAX_CMA_AREAS]; 16extern struct cma cma_areas[MAX_CMA_AREAS];
17extern unsigned cma_area_count; 17extern unsigned cma_area_count;
18 18
19static unsigned long cma_bitmap_maxno(struct cma *cma) 19static inline unsigned long cma_bitmap_maxno(struct cma *cma)
20{ 20{
21 return cma->count >> cma->order_per_bit; 21 return cma->count >> cma->order_per_bit;
22} 22}
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index 7621ee34daa0..f8e4b60db167 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -39,7 +39,7 @@ static int cma_used_get(void *data, u64 *val)
39 39
40 mutex_lock(&cma->lock); 40 mutex_lock(&cma->lock);
41 /* pages counter is smaller than sizeof(int) */ 41 /* pages counter is smaller than sizeof(int) */
42 used = bitmap_weight(cma->bitmap, (int)cma->count); 42 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
43 mutex_unlock(&cma->lock); 43 mutex_unlock(&cma->lock);
44 *val = (u64)used << cma->order_per_bit; 44 *val = (u64)used << cma->order_per_bit;
45 45
@@ -52,13 +52,14 @@ static int cma_maxchunk_get(void *data, u64 *val)
52 struct cma *cma = data; 52 struct cma *cma = data;
53 unsigned long maxchunk = 0; 53 unsigned long maxchunk = 0;
54 unsigned long start, end = 0; 54 unsigned long start, end = 0;
55 unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
55 56
56 mutex_lock(&cma->lock); 57 mutex_lock(&cma->lock);
57 for (;;) { 58 for (;;) {
58 start = find_next_zero_bit(cma->bitmap, cma->count, end); 59 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
59 if (start >= cma->count) 60 if (start >= cma->count)
60 break; 61 break;
61 end = find_next_bit(cma->bitmap, cma->count, start); 62 end = find_next_bit(cma->bitmap, bitmap_maxno, start);
62 maxchunk = max(end - start, maxchunk); 63 maxchunk = max(end - start, maxchunk);
63 } 64 }
64 mutex_unlock(&cma->lock); 65 mutex_unlock(&cma->lock);
@@ -170,10 +171,10 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
170 171
171 tmp = debugfs_create_dir(name, cma_debugfs_root); 172 tmp = debugfs_create_dir(name, cma_debugfs_root);
172 173
173 debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma, 174 debugfs_create_file("alloc", S_IWUSR, tmp, cma,
174 &cma_alloc_fops); 175 &cma_alloc_fops);
175 176
176 debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma, 177 debugfs_create_file("free", S_IWUSR, tmp, cma,
177 &cma_free_fops); 178 &cma_free_fops);
178 179
179 debugfs_create_file("base_pfn", S_IRUGO, tmp, 180 debugfs_create_file("base_pfn", S_IRUGO, tmp,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
1676 /* after clearing PageTail the gup refcount can be released */ 1676 /* after clearing PageTail the gup refcount can be released */
1677 smp_mb__after_atomic(); 1677 smp_mb__after_atomic();
1678 1678
1679 /* 1679 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1680 * retain hwpoison flag of the poisoned tail page:
1681 * fix for the unsuitable process killed on Guest Machine(KVM)
1682 * by the memory-failure.
1683 */
1684 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1685 page_tail->flags |= (page->flags & 1680 page_tail->flags |= (page->flags &
1686 ((1L << PG_referenced) | 1681 ((1L << PG_referenced) |
1687 (1L << PG_swapbacked) | 1682 (1L << PG_swapbacked) |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a63ea84..7b28e9cdf1c7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
2 * This file contains shadow memory manipulation code. 2 * This file contains shadow memory manipulation code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceedf810a..e07c94fbd0ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
2 * This file contains error reporting code. 2 * This file contains error reporting code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..1f4446a90cef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
909 * directly for tail pages. 909 * directly for tail pages.
910 */ 910 */
911 if (PageTransHuge(head)) { 911 if (PageTransHuge(head)) {
912 /*
913 * Non anonymous thp exists only in allocation/free time. We
914 * can't handle such a case correctly, so let's give it up.
915 * This should be better than triggering BUG_ON when kernel
916 * tries to touch the "partially handled" page.
917 */
918 if (!PageAnon(head)) {
919 pr_err("MCE: %#lx: non anonymous thp\n",
920 page_to_pfn(page));
921 return 0;
922 }
923
912 if (get_page_unless_zero(head)) { 924 if (get_page_unless_zero(head)) {
913 if (PageTail(page)) 925 if (PageTail(page))
914 get_page(page); 926 get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1134 } 1146 }
1135 1147
1136 if (!PageHuge(p) && PageTransHuge(hpage)) { 1148 if (!PageHuge(p) && PageTransHuge(hpage)) {
1137 if (!PageAnon(hpage)) { 1149 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1138 pr_err("MCE: %#lx: non anonymous thp\n", pfn); 1150 if (!PageAnon(hpage))
1139 if (TestClearPageHWPoison(p)) 1151 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1140 atomic_long_sub(nr_pages, &num_poisoned_pages); 1152 else
1141 put_page(p); 1153 pr_err("MCE: %#lx: thp split failed\n", pfn);
1142 if (p != hpage)
1143 put_page(hpage);
1144 return -EBUSY;
1145 }
1146 if (unlikely(split_huge_page(hpage))) {
1147 pr_err("MCE: %#lx: thp split failed\n", pfn);
1148 if (TestClearPageHWPoison(p)) 1154 if (TestClearPageHWPoison(p))
1149 atomic_long_sub(nr_pages, &num_poisoned_pages); 1155 atomic_long_sub(nr_pages, &num_poisoned_pages);
1150 put_page(p); 1156 put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1209 if (!PageHWPoison(p)) { 1215 if (!PageHWPoison(p)) {
1210 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1216 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1211 atomic_long_sub(nr_pages, &num_poisoned_pages); 1217 atomic_long_sub(nr_pages, &num_poisoned_pages);
1218 unlock_page(hpage);
1212 put_page(hpage); 1219 put_page(hpage);
1213 res = 0; 1220 return 0;
1214 goto out;
1215 } 1221 }
1216 if (hwpoison_filter(p)) { 1222 if (hwpoison_filter(p)) {
1217 if (TestClearPageHWPoison(p)) 1223 if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
1535 */ 1541 */
1536 ret = __get_any_page(page, pfn, 0); 1542 ret = __get_any_page(page, pfn, 0);
1537 if (!PageLRU(page)) { 1543 if (!PageLRU(page)) {
1544 /* Drop page reference which is from __get_any_page() */
1545 put_page(page);
1538 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", 1546 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1539 pfn, page->flags); 1547 pfn, page->flags);
1540 return -EIO; 1548 return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
1564 unlock_page(hpage); 1572 unlock_page(hpage);
1565 1573
1566 ret = isolate_huge_page(hpage, &pagelist); 1574 ret = isolate_huge_page(hpage, &pagelist);
1567 if (ret) { 1575 /*
1568 /* 1576 * get_any_page() and isolate_huge_page() takes a refcount each,
1569 * get_any_page() and isolate_huge_page() takes a refcount each, 1577 * so need to drop one here.
1570 * so need to drop one here. 1578 */
1571 */ 1579 put_page(hpage);
1572 put_page(hpage); 1580 if (!ret) {
1573 } else {
1574 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); 1581 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1575 return -EBUSY; 1582 return -EBUSY;
1576 } 1583 }
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
1656 inc_zone_page_state(page, NR_ISOLATED_ANON + 1663 inc_zone_page_state(page, NR_ISOLATED_ANON +
1657 page_is_file_cache(page)); 1664 page_is_file_cache(page));
1658 list_add(&page->lru, &pagelist); 1665 list_add(&page->lru, &pagelist);
1666 if (!TestSetPageHWPoison(page))
1667 atomic_long_inc(&num_poisoned_pages);
1659 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, 1668 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1660 MIGRATE_SYNC, MR_MEMORY_FAILURE); 1669 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1661 if (ret) { 1670 if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
1670 pfn, ret, page->flags); 1679 pfn, ret, page->flags);
1671 if (ret > 0) 1680 if (ret > 0)
1672 ret = -EIO; 1681 ret = -EIO;
1673 } else { 1682 if (TestClearPageHWPoison(page))
1674 SetPageHWPoison(page); 1683 atomic_long_dec(&num_poisoned_pages);
1675 atomic_long_inc(&num_poisoned_pages);
1676 } 1684 }
1677 } else { 1685 } else {
1678 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1686 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
index a84fbb772034..388dcf9aa283 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2670,6 +2670,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2670 2670
2671 pte_unmap(page_table); 2671 pte_unmap(page_table);
2672 2672
2673 /* File mapping without ->vm_ops ? */
2674 if (vma->vm_flags & VM_SHARED)
2675 return VM_FAULT_SIGBUS;
2676
2673 /* Check if we need to add a guard page to the stack */ 2677 /* Check if we need to add a guard page to the stack */
2674 if (check_stack_guard_page(vma, address) < 0) 2678 if (check_stack_guard_page(vma, address) < 0)
2675 return VM_FAULT_SIGSEGV; 2679 return VM_FAULT_SIGSEGV;
@@ -3099,6 +3103,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3099 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 3103 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
3100 3104
3101 pte_unmap(page_table); 3105 pte_unmap(page_table);
3106 /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
3107 if (!vma->vm_ops->fault)
3108 return VM_FAULT_SIGBUS;
3102 if (!(flags & FAULT_FLAG_WRITE)) 3109 if (!(flags & FAULT_FLAG_WRITE))
3103 return do_read_fault(mm, vma, address, pmd, pgoff, flags, 3110 return do_read_fault(mm, vma, address, pmd, pgoff, flags,
3104 orig_pte); 3111 orig_pte);
@@ -3244,13 +3251,12 @@ static int handle_pte_fault(struct mm_struct *mm,
3244 barrier(); 3251 barrier();
3245 if (!pte_present(entry)) { 3252 if (!pte_present(entry)) {
3246 if (pte_none(entry)) { 3253 if (pte_none(entry)) {
3247 if (vma->vm_ops) { 3254 if (vma->vm_ops)
3248 if (likely(vma->vm_ops->fault)) 3255 return do_fault(mm, vma, address, pte, pmd,
3249 return do_fault(mm, vma, address, pte, 3256 flags, entry);
3250 pmd, flags, entry); 3257
3251 } 3258 return do_anonymous_page(mm, vma, address, pte, pmd,
3252 return do_anonymous_page(mm, vma, address, 3259 flags);
3253 pte, pmd, flags);
3254 } 3260 }
3255 return do_swap_page(mm, vma, address, 3261 return do_swap_page(mm, vma, address,
3256 pte, pmd, flags, entry); 3262 pte, pmd, flags, entry);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..6da82bcb0a8b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
446 int nr_pages = PAGES_PER_SECTION; 446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id; 447 int nid = pgdat->node_id;
448 int zone_type; 448 int zone_type;
449 unsigned long flags; 449 unsigned long flags, pfn;
450 int ret; 450 int ret;
451 451
452 zone_type = zone - pgdat->node_zones; 452 zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
461 pgdat_resize_unlock(zone->zone_pgdat, &flags); 461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type, 462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG); 463 phys_start_pfn, MEMMAP_HOTPLUG);
464
465 /* online_page_range is called later and expects pages reserved */
466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
467 if (!pfn_valid(pfn))
468 continue;
469
470 SetPageReserved(pfn_to_page(pfn));
471 }
464 return 0; 472 return 0;
465} 473}
466 474
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
1269 1277
1270 /* create new memmap entry */ 1278 /* create new memmap entry */
1271 firmware_map_add_hotplug(start, start + size, "System RAM"); 1279 firmware_map_add_hotplug(start, start + size, "System RAM");
1280 memblock_add_node(start, size, nid);
1272 1281
1273 goto out; 1282 goto out;
1274 1283
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
2005 2014
2006 /* remove memmap entry */ 2015 /* remove memmap entry */
2007 firmware_map_remove(start, start + size, "System RAM"); 2016 firmware_map_remove(start, start + size, "System RAM");
2017 memblock_free(start, size);
2018 memblock_remove(start, size);
2008 2019
2009 arch_remove_memory(start, size); 2020 arch_remove_memory(start, size);
2010 2021
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
880 /* Establish migration ptes or remove ptes */ 880 /* Establish migration ptes or remove ptes */
881 if (page_mapped(page)) { 881 if (page_mapped(page)) {
882 try_to_unmap(page, 882 try_to_unmap(page,
883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
884 TTU_IGNORE_HWPOISON);
884 page_was_mapped = 1; 885 page_was_mapped = 1;
885 } 886 }
886 887
@@ -950,7 +951,10 @@ out:
950 list_del(&page->lru); 951 list_del(&page->lru);
951 dec_zone_page_state(page, NR_ISOLATED_ANON + 952 dec_zone_page_state(page, NR_ISOLATED_ANON +
952 page_is_file_cache(page)); 953 page_is_file_cache(page));
953 if (reason != MR_MEMORY_FAILURE) 954 /* Soft-offlined page shouldn't go through lru cache list */
955 if (reason == MR_MEMORY_FAILURE)
956 put_page(page);
957 else
954 putback_lru_page(page); 958 putback_lru_page(page);
955 } 959 }
956 960
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
2063 */ 2063 */
2064void __init page_writeback_init(void) 2064void __init page_writeback_init(void)
2065{ 2065{
2066 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2067
2066 writeback_set_ratelimit(); 2068 writeback_set_ratelimit();
2067 register_cpu_notifier(&ratelimit_nb); 2069 register_cpu_notifier(&ratelimit_nb);
2068
2069 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2070} 2070}
2071 2071
2072/** 2072/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 506eac8b38af..5b5240b7f642 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/rwsem.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -246,9 +245,7 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
246/* Returns true if the struct page for the pfn is uninitialised */ 245/* Returns true if the struct page for the pfn is uninitialised */
247static inline bool __meminit early_page_uninitialised(unsigned long pfn) 246static inline bool __meminit early_page_uninitialised(unsigned long pfn)
248{ 247{
249 int nid = early_pfn_to_nid(pfn); 248 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
250
251 if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
252 return true; 249 return true;
253 250
254 return false; 251 return false;
@@ -983,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
983 980
984#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 981#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
985 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 982 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
986/* Only safe to use early in boot when initialisation is single-threaded */ 983
987static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 984static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
988 985
989int __meminit early_pfn_to_nid(unsigned long pfn) 986int __meminit early_pfn_to_nid(unsigned long pfn)
990{ 987{
988 static DEFINE_SPINLOCK(early_pfn_lock);
991 int nid; 989 int nid;
992 990
993 /* The system will behave unpredictably otherwise */ 991 spin_lock(&early_pfn_lock);
994 BUG_ON(system_state != SYSTEM_BOOTING);
995
996 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 992 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
997 if (nid >= 0) 993 if (nid < 0)
998 return nid; 994 nid = 0;
999 /* just returns 0 */ 995 spin_unlock(&early_pfn_lock);
1000 return 0; 996
997 return nid;
1001} 998}
1002#endif 999#endif
1003 1000
@@ -1062,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
1062 __free_pages_boot_core(page, pfn, 0); 1059 __free_pages_boot_core(page, pfn, 0);
1063} 1060}
1064 1061
1065static __initdata DECLARE_RWSEM(pgdat_init_rwsem); 1062/* Completion tracking for deferred_init_memmap() threads */
1063static atomic_t pgdat_init_n_undone __initdata;
1064static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1065
1066static inline void __init pgdat_init_report_one_done(void)
1067{
1068 if (atomic_dec_and_test(&pgdat_init_n_undone))
1069 complete(&pgdat_init_all_done_comp);
1070}
1066 1071
1067/* Initialise remaining memory on a node */ 1072/* Initialise remaining memory on a node */
1068static int __init deferred_init_memmap(void *data) 1073static int __init deferred_init_memmap(void *data)
@@ -1079,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
1079 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1084 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1080 1085
1081 if (first_init_pfn == ULONG_MAX) { 1086 if (first_init_pfn == ULONG_MAX) {
1082 up_read(&pgdat_init_rwsem); 1087 pgdat_init_report_one_done();
1083 return 0; 1088 return 0;
1084 } 1089 }
1085 1090
@@ -1179,7 +1184,8 @@ free_range:
1179 1184
1180 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1185 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1181 jiffies_to_msecs(jiffies - start)); 1186 jiffies_to_msecs(jiffies - start));
1182 up_read(&pgdat_init_rwsem); 1187
1188 pgdat_init_report_one_done();
1183 return 0; 1189 return 0;
1184} 1190}
1185 1191
@@ -1187,14 +1193,17 @@ void __init page_alloc_init_late(void)
1187{ 1193{
1188 int nid; 1194 int nid;
1189 1195
1196 /* There will be num_node_state(N_MEMORY) threads */
1197 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1190 for_each_node_state(nid, N_MEMORY) { 1198 for_each_node_state(nid, N_MEMORY) {
1191 down_read(&pgdat_init_rwsem);
1192 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1199 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1193 } 1200 }
1194 1201
1195 /* Block until all are initialised */ 1202 /* Block until all are initialised */
1196 down_write(&pgdat_init_rwsem); 1203 wait_for_completion(&pgdat_init_all_done_comp);
1197 up_write(&pgdat_init_rwsem); 1204
1205 /* Reinit limits that are based on free pages after the kernel is up */
1206 files_maxfiles_init();
1198} 1207}
1199#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1208#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1200 1209
@@ -1287,6 +1296,10 @@ static inline int check_new_page(struct page *page)
1287 bad_reason = "non-NULL mapping"; 1296 bad_reason = "non-NULL mapping";
1288 if (unlikely(atomic_read(&page->_count) != 0)) 1297 if (unlikely(atomic_read(&page->_count) != 0))
1289 bad_reason = "nonzero _count"; 1298 bad_reason = "nonzero _count";
1299 if (unlikely(page->flags & __PG_HWPOISON)) {
1300 bad_reason = "HWPoisoned (hardware-corrupted)";
1301 bad_flags = __PG_HWPOISON;
1302 }
1290 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1303 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1291 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1304 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1292 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1305 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -1330,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1330 set_page_owner(page, order, gfp_flags); 1343 set_page_owner(page, order, gfp_flags);
1331 1344
1332 /* 1345 /*
1333 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to 1346 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1334 * allocate the page. The expectation is that the caller is taking 1347 * allocate the page. The expectation is that the caller is taking
1335 * steps that will free more memory. The caller should avoid the page 1348 * steps that will free more memory. The caller should avoid the page
1336 * being used for !PFMEMALLOC purposes. 1349 * being used for !PFMEMALLOC purposes.
1337 */ 1350 */
1338 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); 1351 if (alloc_flags & ALLOC_NO_WATERMARKS)
1352 set_page_pfmemalloc(page);
1353 else
1354 clear_page_pfmemalloc(page);
1339 1355
1340 return 0; 1356 return 0;
1341} 1357}
@@ -1950,6 +1966,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
1950void split_page(struct page *page, unsigned int order) 1966void split_page(struct page *page, unsigned int order)
1951{ 1967{
1952 int i; 1968 int i;
1969 gfp_t gfp_mask;
1953 1970
1954 VM_BUG_ON_PAGE(PageCompound(page), page); 1971 VM_BUG_ON_PAGE(PageCompound(page), page);
1955 VM_BUG_ON_PAGE(!page_count(page), page); 1972 VM_BUG_ON_PAGE(!page_count(page), page);
@@ -1963,10 +1980,11 @@ void split_page(struct page *page, unsigned int order)
1963 split_page(virt_to_page(page[0].shadow), order); 1980 split_page(virt_to_page(page[0].shadow), order);
1964#endif 1981#endif
1965 1982
1966 set_page_owner(page, 0, 0); 1983 gfp_mask = get_page_owner_gfp(page);
1984 set_page_owner(page, 0, gfp_mask);
1967 for (i = 1; i < (1 << order); i++) { 1985 for (i = 1; i < (1 << order); i++) {
1968 set_page_refcounted(page + i); 1986 set_page_refcounted(page + i);
1969 set_page_owner(page + i, 0, 0); 1987 set_page_owner(page + i, 0, gfp_mask);
1970 } 1988 }
1971} 1989}
1972EXPORT_SYMBOL_GPL(split_page); 1990EXPORT_SYMBOL_GPL(split_page);
@@ -1996,6 +2014,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
1996 zone->free_area[order].nr_free--; 2014 zone->free_area[order].nr_free--;
1997 rmv_page_order(page); 2015 rmv_page_order(page);
1998 2016
2017 set_page_owner(page, order, __GFP_MOVABLE);
2018
1999 /* Set the pageblock if the isolated page is at least a pageblock */ 2019 /* Set the pageblock if the isolated page is at least a pageblock */
2000 if (order >= pageblock_order - 1) { 2020 if (order >= pageblock_order - 1) {
2001 struct page *endpage = page + (1 << order) - 1; 2021 struct page *endpage = page + (1 << order) - 1;
@@ -2007,7 +2027,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
2007 } 2027 }
2008 } 2028 }
2009 2029
2010 set_page_owner(page, order, 0); 2030
2011 return 1UL << order; 2031 return 1UL << order;
2012} 2032}
2013 2033
@@ -3328,7 +3348,7 @@ refill:
3328 atomic_add(size - 1, &page->_count); 3348 atomic_add(size - 1, &page->_count);
3329 3349
3330 /* reset page count bias and offset to start of new frag */ 3350 /* reset page count bias and offset to start of new frag */
3331 nc->pfmemalloc = page->pfmemalloc; 3351 nc->pfmemalloc = page_is_pfmemalloc(page);
3332 nc->pagecnt_bias = size; 3352 nc->pagecnt_bias = size;
3333 nc->offset = size; 3353 nc->offset = size;
3334 } 3354 }
@@ -5043,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5043{ 5063{
5044 unsigned long zone_start_pfn, zone_end_pfn; 5064 unsigned long zone_start_pfn, zone_end_pfn;
5045 5065
5066 /* When hotadd a new node, the node should be empty */
5067 if (!node_start_pfn && !node_end_pfn)
5068 return 0;
5069
5046 /* Get the start and end of the zone */ 5070 /* Get the start and end of the zone */
5047 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5071 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5048 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5072 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5106,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
5106 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5130 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5107 unsigned long zone_start_pfn, zone_end_pfn; 5131 unsigned long zone_start_pfn, zone_end_pfn;
5108 5132
5133 /* When hotadd a new node, the node should be empty */
5134 if (!node_start_pfn && !node_end_pfn)
5135 return 0;
5136
5109 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5137 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5110 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5138 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5111 5139
diff --git a/mm/page_owner.c b/mm/page_owner.c
index bd5f842b56d2..983c3a10fa07 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -76,6 +76,13 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
76 __set_bit(PAGE_EXT_OWNER, &page_ext->flags); 76 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
77} 77}
78 78
79gfp_t __get_page_owner_gfp(struct page *page)
80{
81 struct page_ext *page_ext = lookup_page_ext(page);
82
83 return page_ext->gfp_mask;
84}
85
79static ssize_t 86static ssize_t
80print_page_owner(char __user *buf, size_t count, unsigned long pfn, 87print_page_owner(char __user *buf, size_t count, unsigned long pfn,
81 struct page *page, struct page_ext *page_ext) 88 struct page *page, struct page_ext *page_ext)
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3364 * kernel internal. There will be NO LSM permission checks against the 3364 * kernel internal. There will be NO LSM permission checks against the
3365 * underlying inode. So users of this interface must do LSM checks at a 3365 * underlying inode. So users of this interface must do LSM checks at a
3366 * higher layer. The one user is the big_key implementation. LSM checks 3366 * higher layer. The users are the big_key and shm implementations. LSM
3367 * are provided at the key level rather than the inode level. 3367 * checks are provided at the key or shm level rather than the inode.
3368 * @name: name for dentry (to be seen in /proc/<pid>/maps 3368 * @name: name for dentry (to be seen in /proc/<pid>/maps
3369 * @size: size to be set for the file 3369 * @size: size to be set for the file
3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab.c b/mm/slab.c
index 200e22412a16..bbd0b47dc6a9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1603 } 1603 }
1604 1604
1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ 1605 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
1606 if (unlikely(page->pfmemalloc)) 1606 if (page_is_pfmemalloc(page))
1607 pfmemalloc_active = true; 1607 pfmemalloc_active = true;
1608 1608
1609 nr_pages = (1 << cachep->gfporder); 1609 nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
1614 add_zone_page_state(page_zone(page), 1614 add_zone_page_state(page_zone(page),
1615 NR_SLAB_UNRECLAIMABLE, nr_pages); 1615 NR_SLAB_UNRECLAIMABLE, nr_pages);
1616 __SetPageSlab(page); 1616 __SetPageSlab(page);
1617 if (page->pfmemalloc) 1617 if (page_is_pfmemalloc(page))
1618 SetPageSlabPfmemalloc(page); 1618 SetPageSlabPfmemalloc(page);
1619 1619
1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { 1620 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB)
39 39
40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42 41
43/* 42/*
44 * Merge control. If this is set then no merging of slab caches will occur. 43 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 816df0016555..f68c0e50f3c0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1427 inc_slabs_node(s, page_to_nid(page), page->objects); 1427 inc_slabs_node(s, page_to_nid(page), page->objects);
1428 page->slab_cache = s; 1428 page->slab_cache = s;
1429 __SetPageSlab(page); 1429 __SetPageSlab(page);
1430 if (page->pfmemalloc) 1430 if (page_is_pfmemalloc(page))
1431 SetPageSlabPfmemalloc(page); 1431 SetPageSlabPfmemalloc(page);
1432 1432
1433 start = page_address(page); 1433 start = page_address(page);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e61445dce04e..8286938c70de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
973 * caller can stall after page list has been processed. 973 * caller can stall after page list has been processed.
974 * 974 *
975 * 2) Global or new memcg reclaim encounters a page that is 975 * 2) Global or new memcg reclaim encounters a page that is
976 * not marked for immediate reclaim or the caller does not 976 * not marked for immediate reclaim, or the caller does not
977 * have __GFP_IO. In this case mark the page for immediate 977 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
978 * not to fs). In this case mark the page for immediate
978 * reclaim and continue scanning. 979 * reclaim and continue scanning.
979 * 980 *
980 * __GFP_IO is checked because a loop driver thread might 981 * Require may_enter_fs because we would wait on fs, which
982 * may not have submitted IO yet. And the loop driver might
981 * enter reclaim, and deadlock if it waits on a page for 983 * enter reclaim, and deadlock if it waits on a page for
982 * which it is needed to do the write (loop masks off 984 * which it is needed to do the write (loop masks off
983 * __GFP_IO|__GFP_FS for this reason); but more thought 985 * __GFP_IO|__GFP_FS for this reason); but more thought
984 * would probably show more reasons. 986 * would probably show more reasons.
985 * 987 *
986 * Don't require __GFP_FS, since we're not going into the
987 * FS, just waiting on its writeback completion. Worryingly,
988 * ext4 gfs2 and xfs allocate pages with
989 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
990 * may_enter_fs here is liable to OOM on them.
991 *
992 * 3) Legacy memcg encounters a page that is not already marked 988 * 3) Legacy memcg encounters a page that is not already marked
993 * PageReclaim. memcg does not have any dirty pages 989 * PageReclaim. memcg does not have any dirty pages
994 * throttling so we could easily OOM just because too many 990 * throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1005 1001
1006 /* Case 2 above */ 1002 /* Case 2 above */
1007 } else if (sane_reclaim(sc) || 1003 } else if (sane_reclaim(sc) ||
1008 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { 1004 !PageReclaim(page) || !may_enter_fs) {
1009 /* 1005 /*
1010 * This is slightly racy - end_page_writeback() 1006 * This is slightly racy - end_page_writeback()
1011 * might have just cleared PageReclaim, then 1007 * might have just cleared PageReclaim, then
diff --git a/net/9p/client.c b/net/9p/client.c
index 498454b3c06c..ea79ee9a7348 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
1541 struct p9_client *clnt = fid->clnt; 1541 struct p9_client *clnt = fid->clnt;
1542 struct p9_req_t *req; 1542 struct p9_req_t *req;
1543 int total = 0; 1543 int total = 0;
1544 *err = 0;
1544 1545
1545 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", 1546 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1546 fid->fid, (unsigned long long) offset, (int)iov_iter_count(to)); 1547 fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
1620 struct p9_client *clnt = fid->clnt; 1621 struct p9_client *clnt = fid->clnt;
1621 struct p9_req_t *req; 1622 struct p9_req_t *req;
1622 int total = 0; 1623 int total = 0;
1624 *err = 0;
1623 1625
1624 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n", 1626 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
1625 fid->fid, (unsigned long long) offset, 1627 fid->fid, (unsigned long long) offset,
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
704 704
705 mutex_unlock(&virtio_9p_lock); 705 mutex_unlock(&virtio_9p_lock);
706 706
707 vdev->config->reset(vdev);
707 vdev->config->del_vqs(vdev); 708 vdev->config->del_vqs(vdev);
708 709
709 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 710 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 1997538a5d23..3b78e8473a01 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 ax25_stop_heartbeat(ax25);
267 ax25_stop_t1timer(ax25); 268 ax25_stop_t1timer(ax25);
268 ax25_stop_t2timer(ax25); 269 ax25_stop_t2timer(ax25);
269 ax25_stop_t3timer(ax25); 270 ax25_stop_t3timer(ax25);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6aed096..6d0b471eede8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
1138 * @bat_priv: the bat priv with all the soft interface information 1138 * @bat_priv: the bat priv with all the soft interface information
1139 * @skb: packet to check 1139 * @skb: packet to check
1140 * @hdr_size: size of the encapsulation header 1140 * @hdr_size: size of the encapsulation header
1141 *
1142 * Returns true if the packet was snooped and consumed by DAT. False if the
1143 * packet has to be delivered to the interface
1141 */ 1144 */
1142bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, 1145bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1143 struct sk_buff *skb, int hdr_size) 1146 struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1145 uint16_t type; 1148 uint16_t type;
1146 __be32 ip_src, ip_dst; 1149 __be32 ip_src, ip_dst;
1147 uint8_t *hw_src, *hw_dst; 1150 uint8_t *hw_src, *hw_dst;
1148 bool ret = false; 1151 bool dropped = false;
1149 unsigned short vid; 1152 unsigned short vid;
1150 1153
1151 if (!atomic_read(&bat_priv->distributed_arp_table)) 1154 if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1174 /* if this REPLY is directed to a client of mine, let's deliver the 1177 /* if this REPLY is directed to a client of mine, let's deliver the
1175 * packet to the interface 1178 * packet to the interface
1176 */ 1179 */
1177 ret = !batadv_is_my_client(bat_priv, hw_dst, vid); 1180 dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
1181
1182 /* if this REPLY is sent on behalf of a client of mine, let's drop the
1183 * packet because the client will reply by itself
1184 */
1185 dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
1178out: 1186out:
1179 if (ret) 1187 if (dropped)
1180 kfree_skb(skb); 1188 kfree_skb(skb);
1181 /* if ret == false -> packet has to be delivered to the interface */ 1189 /* if dropped == false -> deliver to the interface */
1182 return ret; 1190 return dropped;
1183} 1191}
1184 1192
1185/** 1193/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb0158620628..cffa92dd9877 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
439 439
440 INIT_HLIST_NODE(&gw_node->list); 440 INIT_HLIST_NODE(&gw_node->list);
441 gw_node->orig_node = orig_node; 441 gw_node->orig_node = orig_node;
442 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
443 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
442 atomic_set(&gw_node->refcount, 1); 444 atomic_set(&gw_node->refcount, 1);
443 445
444 spin_lock_bh(&bat_priv->gw.list_lock); 446 spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961da75d..a2fc843c2243 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@ out:
479 */ 479 */
480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) 480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
481{ 481{
482 if (!vlan)
483 return;
484
482 if (atomic_dec_and_test(&vlan->refcount)) { 485 if (atomic_dec_and_test(&vlan->refcount)) {
483 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); 486 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
484 hlist_del_rcu(&vlan->list); 487 hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b4824951010b..5809b39c1922 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
594 594
595 /* increase the refcounter of the related vlan */ 595 /* increase the refcounter of the related vlan */
596 vlan = batadv_softif_vlan_get(bat_priv, vid); 596 vlan = batadv_softif_vlan_get(bat_priv, vid);
597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
598 addr, BATADV_PRINT_VID(vid))) {
599 kfree(tt_local);
600 tt_local = NULL;
601 goto out;
602 }
597 603
598 batadv_dbg(BATADV_DBG_TT, bat_priv, 604 batadv_dbg(BATADV_DBG_TT, bat_priv,
599 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 605 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1034 struct batadv_tt_local_entry *tt_local_entry; 1040 struct batadv_tt_local_entry *tt_local_entry;
1035 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1041 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1036 struct batadv_softif_vlan *vlan; 1042 struct batadv_softif_vlan *vlan;
1043 void *tt_entry_exists;
1037 1044
1038 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1045 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1039 if (!tt_local_entry) 1046 if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1061 * immediately purge it 1068 * immediately purge it
1062 */ 1069 */
1063 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); 1070 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
1064 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1071
1072 tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
1073 batadv_compare_tt,
1074 batadv_choose_tt,
1075 &tt_local_entry->common);
1076 if (!tt_entry_exists)
1077 goto out;
1078
1079 /* extra call to free the local tt entry */
1065 batadv_tt_local_entry_free_ref(tt_local_entry); 1080 batadv_tt_local_entry_free_ref(tt_local_entry);
1066 1081
1067 /* decrease the reference held for this vlan */ 1082 /* decrease the reference held for this vlan */
1068 vlan = batadv_softif_vlan_get(bat_priv, vid); 1083 vlan = batadv_softif_vlan_get(bat_priv, vid);
1084 if (!vlan)
1085 goto out;
1086
1069 batadv_softif_vlan_free_ref(vlan); 1087 batadv_softif_vlan_free_ref(vlan);
1070 batadv_softif_vlan_free_ref(vlan); 1088 batadv_softif_vlan_free_ref(vlan);
1071 1089
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1166 /* decrease the reference held for this vlan */ 1184 /* decrease the reference held for this vlan */
1167 vlan = batadv_softif_vlan_get(bat_priv, 1185 vlan = batadv_softif_vlan_get(bat_priv,
1168 tt_common_entry->vid); 1186 tt_common_entry->vid);
1169 batadv_softif_vlan_free_ref(vlan); 1187 if (vlan) {
1170 batadv_softif_vlan_free_ref(vlan); 1188 batadv_softif_vlan_free_ref(vlan);
1189 batadv_softif_vlan_free_ref(vlan);
1190 }
1171 1191
1172 batadv_tt_local_entry_free_ref(tt_local); 1192 batadv_tt_local_entry_free_ref(tt_local);
1173 } 1193 }
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3207 3227
3208 /* decrease the reference held for this vlan */ 3228 /* decrease the reference held for this vlan */
3209 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); 3229 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3210 batadv_softif_vlan_free_ref(vlan); 3230 if (vlan) {
3211 batadv_softif_vlan_free_ref(vlan); 3231 batadv_softif_vlan_free_ref(vlan);
3232 batadv_softif_vlan_free_ref(vlan);
3233 }
3212 3234
3213 batadv_tt_local_entry_free_ref(tt_local); 3235 batadv_tt_local_entry_free_ref(tt_local);
3214 } 3236 }
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb279165..92720f3fe573 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7820 /* Make sure we copy only the significant bytes based on the 7820 /* Make sure we copy only the significant bytes based on the
7821 * encryption key size, and set the rest of the value to zeroes. 7821 * encryption key size, and set the rest of the value to zeroes.
7822 */ 7822 */
7823 memcpy(ev.key.val, key->val, sizeof(key->enc_size)); 7823 memcpy(ev.key.val, key->val, key->enc_size);
7824 memset(ev.key.val + key->enc_size, 0, 7824 memset(ev.key.val + key->enc_size, 0,
7825 sizeof(ev.key.val) - key->enc_size); 7825 sizeof(ev.key.val) - key->enc_size);
7826 7826
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp; 2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2315 2319
2316 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2317 return 1; 2321 return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e97572b5d2cc..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,14 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
37 37
38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb))
41 kfree_skb(skb); 41 goto drop;
42 } else { 42
43 skb_push(skb, ETH_HLEN); 43 skb_push(skb, ETH_HLEN);
44 br_drop_fake_rtable(skb); 44 br_drop_fake_rtable(skb);
45 dev_queue_xmit(skb); 45 skb_sender_cpu_clear(skb);
46
47 if (skb->ip_summed == CHECKSUM_PARTIAL &&
48 (skb->protocol == htons(ETH_P_8021Q) ||
49 skb->protocol == htons(ETH_P_8021AD))) {
50 int depth;
51
52 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
53 goto drop;
54
55 skb_set_network_header(skb, depth);
46 } 56 }
47 57
58 dev_queue_xmit(skb);
59
60 return 0;
61
62drop:
63 kfree_skb(skb);
48 return 0; 64 return 0;
49} 65}
50EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 66EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index e29ad70b3000..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -323,6 +323,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
323 struct net_bridge_port_group *p; 323 struct net_bridge_port_group *p;
324 struct net_bridge_port_group __rcu **pp; 324 struct net_bridge_port_group __rcu **pp;
325 struct net_bridge_mdb_htable *mdb; 325 struct net_bridge_mdb_htable *mdb;
326 unsigned long now = jiffies;
326 int err; 327 int err;
327 328
328 mdb = mlock_dereference(br->mdb, br); 329 mdb = mlock_dereference(br->mdb, br);
@@ -347,8 +348,9 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
347 if (unlikely(!p)) 348 if (unlikely(!p))
348 return -ENOMEM; 349 return -ENOMEM;
349 rcu_assign_pointer(*pp, p); 350 rcu_assign_pointer(*pp, p);
351 if (state == MDB_TEMPORARY)
352 mod_timer(&p->timer, now + br->multicast_membership_interval);
350 353
351 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
352 return 0; 354 return 0;
353} 355}
354 356
@@ -371,6 +373,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
371 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 373 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
372 return -EINVAL; 374 return -EINVAL;
373 375
376 memset(&ip, 0, sizeof(ip));
374 ip.proto = entry->addr.proto; 377 ip.proto = entry->addr.proto;
375 if (ip.proto == htons(ETH_P_IP)) 378 if (ip.proto == htons(ETH_P_IP))
376 ip.u.ip4 = entry->addr.u.ip4; 379 ip.u.ip4 = entry->addr.u.ip4;
@@ -417,20 +420,14 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
417 if (!netif_running(br->dev) || br->multicast_disabled) 420 if (!netif_running(br->dev) || br->multicast_disabled)
418 return -EINVAL; 421 return -EINVAL;
419 422
423 memset(&ip, 0, sizeof(ip));
420 ip.proto = entry->addr.proto; 424 ip.proto = entry->addr.proto;
421 if (ip.proto == htons(ETH_P_IP)) { 425 if (ip.proto == htons(ETH_P_IP))
422 if (timer_pending(&br->ip4_other_query.timer))
423 return -EBUSY;
424
425 ip.u.ip4 = entry->addr.u.ip4; 426 ip.u.ip4 = entry->addr.u.ip4;
426#if IS_ENABLED(CONFIG_IPV6) 427#if IS_ENABLED(CONFIG_IPV6)
427 } else { 428 else
428 if (timer_pending(&br->ip6_other_query.timer))
429 return -EBUSY;
430
431 ip.u.ip6 = entry->addr.u.ip6; 429 ip.u.ip6 = entry->addr.u.ip6;
432#endif 430#endif
433 }
434 431
435 spin_lock_bh(&br->multicast_lock); 432 spin_lock_bh(&br->multicast_lock);
436 mdb = mlock_dereference(br->mdb, br); 433 mdb = mlock_dereference(br->mdb, br);
@@ -448,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
448 if (p->port->state == BR_STATE_DISABLED) 445 if (p->port->state == BR_STATE_DISABLED)
449 goto unlock; 446 goto unlock;
450 447
448 entry->state = p->state;
451 rcu_assign_pointer(*pp, p->next); 449 rcu_assign_pointer(*pp, p->next);
452 hlist_del_init(&p->mglist); 450 hlist_del_init(&p->mglist);
453 del_timer(&p->timer); 451 del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 742a6c27d7a2..1285eaf5dc22 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
39 struct bridge_mcast_own_query *query); 39 struct bridge_mcast_own_query *query);
40static void br_multicast_add_router(struct net_bridge *br, 40static void br_multicast_add_router(struct net_bridge *br,
41 struct net_bridge_port *port); 41 struct net_bridge_port *port);
42static void br_ip4_multicast_leave_group(struct net_bridge *br,
43 struct net_bridge_port *port,
44 __be32 group,
45 __u16 vid);
46#if IS_ENABLED(CONFIG_IPV6)
47static void br_ip6_multicast_leave_group(struct net_bridge *br,
48 struct net_bridge_port *port,
49 const struct in6_addr *group,
50 __u16 vid);
51#endif
42unsigned int br_mdb_rehash_seq; 52unsigned int br_mdb_rehash_seq;
43 53
44static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 54static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1010 continue; 1020 continue;
1011 } 1021 }
1012 1022
1013 err = br_ip4_multicast_add_group(br, port, group, vid); 1023 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1014 if (err) 1024 type == IGMPV3_MODE_IS_INCLUDE) &&
1015 break; 1025 ntohs(grec->grec_nsrcs) == 0) {
1026 br_ip4_multicast_leave_group(br, port, group, vid);
1027 } else {
1028 err = br_ip4_multicast_add_group(br, port, group, vid);
1029 if (err)
1030 break;
1031 }
1016 } 1032 }
1017 1033
1018 return err; 1034 return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1071 continue; 1087 continue;
1072 } 1088 }
1073 1089
1074 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1090 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1075 vid); 1091 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1076 if (err) 1092 ntohs(*nsrcs) == 0) {
1077 break; 1093 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1094 vid);
1095 } else {
1096 err = br_ip6_multicast_add_group(br, port,
1097 &grec->grec_mca, vid);
1098 if (!err)
1099 break;
1100 }
1078 } 1101 }
1079 1102
1080 return err; 1103 return err;
@@ -1393,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
1393 1416
1394 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1395 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1396 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED))
1397 timer_pending(&other_query->timer))
1398 goto out; 1420 goto out;
1399 1421
1400 mdb = mlock_dereference(br->mdb, br); 1422 mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
1402 if (!mp) 1424 if (!mp)
1403 goto out; 1425 goto out;
1404 1426
1427 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1428 struct net_bridge_port_group __rcu **pp;
1429
1430 for (pp = &mp->ports;
1431 (p = mlock_dereference(*pp, br)) != NULL;
1432 pp = &p->next) {
1433 if (p->port != port)
1434 continue;
1435
1436 rcu_assign_pointer(*pp, p->next);
1437 hlist_del_init(&p->mglist);
1438 del_timer(&p->timer);
1439 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1440 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1441
1442 if (!mp->ports && !mp->mglist &&
1443 netif_running(br->dev))
1444 mod_timer(&mp->timer, jiffies);
1445 }
1446 goto out;
1447 }
1448
1449 if (timer_pending(&other_query->timer))
1450 goto out;
1451
1405 if (br->multicast_querier) { 1452 if (br->multicast_querier) {
1406 __br_multicast_send_query(br, port, &mp->addr); 1453 __br_multicast_send_query(br, port, &mp->addr);
1407 1454
@@ -1427,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
1427 } 1474 }
1428 } 1475 }
1429 1476
1430 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1431 struct net_bridge_port_group __rcu **pp;
1432
1433 for (pp = &mp->ports;
1434 (p = mlock_dereference(*pp, br)) != NULL;
1435 pp = &p->next) {
1436 if (p->port != port)
1437 continue;
1438
1439 rcu_assign_pointer(*pp, p->next);
1440 hlist_del_init(&p->mglist);
1441 del_timer(&p->timer);
1442 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1443 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1444
1445 if (!mp->ports && !mp->mglist &&
1446 netif_running(br->dev))
1447 mod_timer(&mp->timer, jiffies);
1448 }
1449 goto out;
1450 }
1451
1452 now = jiffies; 1477 now = jiffies;
1453 time = now + br->multicast_last_member_count * 1478 time = now + br->multicast_last_member_count *
1454 br->multicast_last_member_interval; 1479 br->multicast_last_member_interval;
@@ -1566,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1566 break; 1591 break;
1567 } 1592 }
1568 1593
1569 if (skb_trimmed) 1594 if (skb_trimmed && skb_trimmed != skb)
1570 kfree_skb(skb_trimmed); 1595 kfree_skb(skb_trimmed);
1571 1596
1572 return err; 1597 return err;
@@ -1611,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1611 break; 1636 break;
1612 } 1637 }
1613 1638
1614 if (skb_trimmed) 1639 if (skb_trimmed && skb_trimmed != skb)
1615 kfree_skb(skb_trimmed); 1640 kfree_skb(skb_trimmed);
1616 1641
1617 return err; 1642 return err;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d89f4fac0bc5..c8b9bcfe997e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -111,7 +111,7 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
111/* largest possible L2 header, see br_nf_dev_queue_xmit() */ 111/* largest possible L2 header, see br_nf_dev_queue_xmit() */
112#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) 112#define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
113 113
114#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 114#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
115struct brnf_frag_data { 115struct brnf_frag_data {
116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; 116 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
117 u8 encap_size; 117 u8 encap_size;
@@ -694,6 +694,7 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
694} 694}
695#endif 695#endif
696 696
697#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
697static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb, 698static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
698 int (*output)(struct sock *, struct sk_buff *)) 699 int (*output)(struct sock *, struct sk_buff *))
699{ 700{
@@ -712,6 +713,7 @@ static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
712 713
713 return ip_do_fragment(sk, skb, output); 714 return ip_do_fragment(sk, skb, output);
714} 715}
716#endif
715 717
716static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) 718static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
717{ 719{
@@ -742,7 +744,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
742 struct brnf_frag_data *data; 744 struct brnf_frag_data *data;
743 745
744 if (br_validate_ipv4(skb)) 746 if (br_validate_ipv4(skb))
745 return NF_DROP; 747 goto drop;
746 748
747 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; 749 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
748 750
@@ -767,7 +769,7 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
767 struct brnf_frag_data *data; 769 struct brnf_frag_data *data;
768 770
769 if (br_validate_ipv6(skb)) 771 if (br_validate_ipv6(skb))
770 return NF_DROP; 772 goto drop;
771 773
772 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; 774 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
773 775
@@ -782,12 +784,16 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
782 784
783 if (v6ops) 785 if (v6ops)
784 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit); 786 return v6ops->fragment(sk, skb, br_nf_push_frag_xmit);
785 else 787
786 return -EMSGSIZE; 788 kfree_skb(skb);
789 return -EMSGSIZE;
787 } 790 }
788#endif 791#endif
789 nf_bridge_info_free(skb); 792 nf_bridge_info_free(skb);
790 return br_dev_queue_push_xmit(sk, skb); 793 return br_dev_queue_push_xmit(sk, skb);
794 drop:
795 kfree_skb(skb);
796 return 0;
791} 797}
792 798
793/* PF_BRIDGE/POST_ROUTING ********************************************/ 799/* PF_BRIDGE/POST_ROUTING ********************************************/
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 6d12d2675c80..13b7d1e3d185 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -104,7 +104,7 @@ int br_validate_ipv6(struct sk_buff *skb)
104{ 104{
105 const struct ipv6hdr *hdr; 105 const struct ipv6hdr *hdr;
106 struct net_device *dev = skb->dev; 106 struct net_device *dev = skb->dev;
107 struct inet6_dev *idev = in6_dev_get(skb->dev); 107 struct inet6_dev *idev = __in6_dev_get(skb->dev);
108 u32 pkt_len; 108 u32 pkt_len;
109 u8 ip6h_len = sizeof(struct ipv6hdr); 109 u8 ip6h_len = sizeof(struct ipv6hdr);
110 110
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6b67ed3831de..4d74a0639c4c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
115 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
116 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
115 + 0; 117 + 0;
116} 118}
117 119
@@ -457,6 +459,8 @@ static int br_afspec(struct net_bridge *br,
457 if (nla_len(attr) != sizeof(struct bridge_vlan_info)) 459 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
458 return -EINVAL; 460 return -EINVAL;
459 vinfo = nla_data(attr); 461 vinfo = nla_data(attr);
462 if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
463 return -EINVAL;
460 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { 464 if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
461 if (vinfo_start) 465 if (vinfo_start)
462 return -EINVAL; 466 return -EINVAL;
@@ -504,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
504 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 508 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
505 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 509 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
506 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 510 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
511 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
512 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
507}; 513};
508 514
509/* Change the state of the port and notify spanning tree */ 515/* Change the state of the port and notify spanning tree */
@@ -691,9 +697,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
691 struct nlattr *tb[], 697 struct nlattr *tb[],
692 struct nlattr *data[]) 698 struct nlattr *data[])
693{ 699{
700 struct net_bridge *br = netdev_priv(brdev);
701 int ret;
702
694 if (!data) 703 if (!data)
695 return 0; 704 return 0;
696 return br_setport(br_port_get_rtnl(dev), data); 705
706 spin_lock_bh(&br->lock);
707 ret = br_setport(br_port_get_rtnl(dev), data);
708 spin_unlock_bh(&br->lock);
709
710 return ret;
697} 711}
698 712
699static int br_port_fill_slave_info(struct sk_buff *skb, 713static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
209 br_send_config_bpdu(p, &bpdu); 209 br_send_config_bpdu(p, &bpdu);
210 p->topology_change_ack = 0; 210 p->topology_change_ack = 0;
211 p->config_pending = 0; 211 p->config_pending = 0;
212 mod_timer(&p->hold_timer, 212 if (p->br->stp_enabled == BR_KERNEL_STP)
213 round_jiffies(jiffies + BR_HOLD_TIME)); 213 mod_timer(&p->hold_timer,
214 round_jiffies(jiffies + BR_HOLD_TIME));
214 } 215 }
215} 216}
216 217
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
48 struct net_bridge_port *p; 48 struct net_bridge_port *p;
49 49
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 if (br->stp_enabled == BR_KERNEL_STP)
52 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 53 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 54
54 br_config_bpdu_generation(br); 55 br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
127 int r; 128 int r;
128 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 129 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
129 char *envp[] = { NULL }; 130 char *envp[] = { NULL };
131 struct net_bridge_port *p;
130 132
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 133 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132 134
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
140 if (r == 0) { 142 if (r == 0) {
141 br->stp_enabled = BR_USER_STP; 143 br->stp_enabled = BR_USER_STP;
142 br_debug(br, "userspace STP started\n"); 144 br_debug(br, "userspace STP started\n");
145 /* Stop hello and hold timers */
146 del_timer(&br->hello_timer);
147 list_for_each_entry(p, &br->port_list, list)
148 del_timer(&p->hold_timer);
143 } else { 149 } else {
144 br->stp_enabled = BR_KERNEL_STP; 150 br->stp_enabled = BR_KERNEL_STP;
145 br_debug(br, "using kernel STP\n"); 151 br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
156 int r; 162 int r;
157 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; 163 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
158 char *envp[] = { NULL }; 164 char *envp[] = { NULL };
165 struct net_bridge_port *p;
159 166
160 if (br->stp_enabled == BR_USER_STP) { 167 if (br->stp_enabled == BR_USER_STP) {
161 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 168 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
162 br_info(br, "userspace STP stopped, return code %d\n", r); 169 br_info(br, "userspace STP stopped, return code %d\n", r);
163 170
164 /* To start timers on any ports left in blocking */ 171 /* To start timers on any ports left in blocking */
172 mod_timer(&br->hello_timer, jiffies + br->hello_time);
173 list_for_each_entry(p, &br->port_list, list)
174 mod_timer(&p->hold_timer,
175 round_jiffies(jiffies + BR_HOLD_TIME));
165 spin_lock_bh(&br->lock); 176 spin_lock_bh(&br->lock);
166 br_port_state_selection(br); 177 br_port_state_selection(br);
167 spin_unlock_bh(&br->lock); 178 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); 43 if (br->stp_enabled != BR_USER_STP)
44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time));
44 } 46 }
45 spin_unlock(&br->lock); 47 spin_unlock(&br->lock);
46} 48}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3cc71b9f5517..cc858919108e 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
122 * not dropped, but CAIF is sending flow off instead. 122 * not dropped, but CAIF is sending flow off instead.
123 */ 123 */
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 unsigned long flags; 127 unsigned long flags;
128 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
130 bool queued = false;
130 131
131 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
132 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
139 140
140 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
141 if (err) 142 if (err)
142 return err; 143 goto out;
144
143 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
144 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
145 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 147 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
147 } 149 }
148 skb->dev = NULL; 150 skb->dev = NULL;
149 skb_set_owner_r(skb, sk); 151 skb_set_owner_r(skb, sk);
150 /* Cache the SKB length before we tack it onto the receive
151 * queue. Once it is added it no longer belongs to us and
152 * may be freed by other threads of control pulling packets
153 * from the queue.
154 */
155 spin_lock_irqsave(&list->lock, flags); 152 spin_lock_irqsave(&list->lock, flags);
156 if (!sock_flag(sk, SOCK_DEAD)) 153 queued = !sock_flag(sk, SOCK_DEAD);
154 if (queued)
157 __skb_queue_tail(list, skb); 155 __skb_queue_tail(list, skb);
158 spin_unlock_irqrestore(&list->lock, flags); 156 spin_unlock_irqrestore(&list->lock, flags);
159 157out:
160 if (!sock_flag(sk, SOCK_DEAD)) 158 if (queued)
161 sk->sk_data_ready(sk); 159 sk->sk_data_ready(sk);
162 else 160 else
163 kfree_skb(skb); 161 kfree_skb(skb);
164 return 0;
165} 162}
166 163
167/* Packet Receive Callback function called from CAIF Stack */ 164/* Packet Receive Callback function called from CAIF Stack */
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7933e62a7318..166d436196c1 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -89,6 +89,8 @@ struct timer_list can_stattimer; /* timer for statistics update */
89struct s_stats can_stats; /* packet statistics */ 89struct s_stats can_stats; /* packet statistics */
90struct s_pstats can_pstats; /* receive list statistics */ 90struct s_pstats can_pstats; /* receive list statistics */
91 91
92static atomic_t skbcounter = ATOMIC_INIT(0);
93
92/* 94/*
93 * af_can socket functions 95 * af_can socket functions
94 */ 96 */
@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
310 return err; 312 return err;
311 } 313 }
312 314
313 if (newskb) { 315 if (newskb)
314 if (!(newskb->tstamp.tv64))
315 __net_timestamp(newskb);
316
317 netif_rx_ni(newskb); 316 netif_rx_ni(newskb);
318 }
319 317
320 /* update statistics */ 318 /* update statistics */
321 can_stats.tx_frames++; 319 can_stats.tx_frames++;
@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
683 can_stats.rx_frames++; 681 can_stats.rx_frames++;
684 can_stats.rx_frames_delta++; 682 can_stats.rx_frames_delta++;
685 683
684 /* create non-zero unique skb identifier together with *skb */
685 while (!(can_skb_prv(skb)->skbcnt))
686 can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
687
686 rcu_read_lock(); 688 rcu_read_lock();
687 689
688 /* deliver the packet to sockets listening on all devices */ 690 /* deliver the packet to sockets listening on all devices */
diff --git a/net/can/bcm.c b/net/can/bcm.c
index b523453585be..a1ba6875c2a2 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
261 261
262 can_skb_reserve(skb); 262 can_skb_reserve(skb);
263 can_skb_prv(skb)->ifindex = dev->ifindex; 263 can_skb_prv(skb)->ifindex = dev->ifindex;
264 can_skb_prv(skb)->skbcnt = 0;
264 265
265 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); 266 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
266 267
@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1217 } 1218 }
1218 1219
1219 can_skb_prv(skb)->ifindex = dev->ifindex; 1220 can_skb_prv(skb)->ifindex = dev->ifindex;
1221 can_skb_prv(skb)->skbcnt = 0;
1220 skb->dev = dev; 1222 skb->dev = dev;
1221 can_skb_set_owner(skb, sk); 1223 can_skb_set_owner(skb, sk);
1222 err = can_send(skb, 1); /* send with loopback */ 1224 err = can_send(skb, 1); /* send with loopback */
diff --git a/net/can/raw.c b/net/can/raw.c
index 31b9748cbb4e..2e67b1423cd3 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
75 */ 75 */
76 76
77struct uniqframe { 77struct uniqframe {
78 ktime_t tstamp; 78 int skbcnt;
79 const struct sk_buff *skb; 79 const struct sk_buff *skb;
80 unsigned int join_rx_count; 80 unsigned int join_rx_count;
81}; 81};
@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
133 133
134 /* eliminate multiple filter matches for the same skb */ 134 /* eliminate multiple filter matches for the same skb */
135 if (this_cpu_ptr(ro->uniq)->skb == oskb && 135 if (this_cpu_ptr(ro->uniq)->skb == oskb &&
136 ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) { 136 this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
137 if (ro->join_filters) { 137 if (ro->join_filters) {
138 this_cpu_inc(ro->uniq->join_rx_count); 138 this_cpu_inc(ro->uniq->join_rx_count);
139 /* drop frame until all enabled filters matched */ 139 /* drop frame until all enabled filters matched */
@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
144 } 144 }
145 } else { 145 } else {
146 this_cpu_ptr(ro->uniq)->skb = oskb; 146 this_cpu_ptr(ro->uniq)->skb = oskb;
147 this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp; 147 this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
148 this_cpu_ptr(ro->uniq)->join_rx_count = 1; 148 this_cpu_ptr(ro->uniq)->join_rx_count = 1;
149 /* drop first frame to check all enabled filters? */ 149 /* drop first frame to check all enabled filters? */
150 if (ro->join_filters && ro->count > 1) 150 if (ro->join_filters && ro->count > 1)
@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
749 749
750 can_skb_reserve(skb); 750 can_skb_reserve(skb);
751 can_skb_prv(skb)->ifindex = dev->ifindex; 751 can_skb_prv(skb)->ifindex = dev->ifindex;
752 can_skb_prv(skb)->skbcnt = 0;
752 753
753 err = memcpy_from_msg(skb_put(skb, size), msg, size); 754 err = memcpy_from_msg(skb_put(skb, size), msg, size);
754 if (err < 0) 755 if (err < 0)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cb7db320dd27..f30329f72641 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -9,6 +9,7 @@
9#include <keys/ceph-type.h> 9#include <keys/ceph-type.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/mount.h> 11#include <linux/mount.h>
12#include <linux/nsproxy.h>
12#include <linux/parser.h> 13#include <linux/parser.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/seq_file.h> 15#include <linux/seq_file.h>
@@ -16,8 +17,6 @@
16#include <linux/statfs.h> 17#include <linux/statfs.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19#include <linux/nsproxy.h>
20#include <net/net_namespace.h>
21 20
22 21
23#include <linux/ceph/ceph_features.h> 22#include <linux/ceph/ceph_features.h>
@@ -131,6 +130,13 @@ int ceph_compare_options(struct ceph_options *new_opt,
131 int i; 130 int i;
132 int ret; 131 int ret;
133 132
133 /*
134 * Don't bother comparing options if network namespaces don't
135 * match.
136 */
137 if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net)))
138 return -1;
139
134 ret = memcmp(opt1, opt2, ofs); 140 ret = memcmp(opt1, opt2, ofs);
135 if (ret) 141 if (ret)
136 return ret; 142 return ret;
@@ -335,9 +341,6 @@ ceph_parse_options(char *options, const char *dev_name,
335 int err = -ENOMEM; 341 int err = -ENOMEM;
336 substring_t argstr[MAX_OPT_ARGS]; 342 substring_t argstr[MAX_OPT_ARGS];
337 343
338 if (current->nsproxy->net_ns != &init_net)
339 return ERR_PTR(-EINVAL);
340
341 opt = kzalloc(sizeof(*opt), GFP_KERNEL); 344 opt = kzalloc(sizeof(*opt), GFP_KERNEL);
342 if (!opt) 345 if (!opt)
343 return ERR_PTR(-ENOMEM); 346 return ERR_PTR(-ENOMEM);
@@ -608,6 +611,7 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
608fail_monc: 611fail_monc:
609 ceph_monc_stop(&client->monc); 612 ceph_monc_stop(&client->monc);
610fail: 613fail:
614 ceph_messenger_fini(&client->msgr);
611 kfree(client); 615 kfree(client);
612 return ERR_PTR(err); 616 return ERR_PTR(err);
613} 617}
@@ -621,8 +625,8 @@ void ceph_destroy_client(struct ceph_client *client)
621 625
622 /* unmount */ 626 /* unmount */
623 ceph_osdc_stop(&client->osdc); 627 ceph_osdc_stop(&client->osdc);
624
625 ceph_monc_stop(&client->monc); 628 ceph_monc_stop(&client->monc);
629 ceph_messenger_fini(&client->msgr);
626 630
627 ceph_debugfs_client_cleanup(client); 631 ceph_debugfs_client_cleanup(client);
628 632
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 1679f47280e2..e3be1d22a247 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -6,6 +6,7 @@
6#include <linux/inet.h> 6#include <linux/inet.h>
7#include <linux/kthread.h> 7#include <linux/kthread.h>
8#include <linux/net.h> 8#include <linux/net.h>
9#include <linux/nsproxy.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include <linux/socket.h> 11#include <linux/socket.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -479,7 +480,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
479 int ret; 480 int ret;
480 481
481 BUG_ON(con->sock); 482 BUG_ON(con->sock);
482 ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family, 483 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
483 SOCK_STREAM, IPPROTO_TCP, &sock); 484 SOCK_STREAM, IPPROTO_TCP, &sock);
484 if (ret) 485 if (ret)
485 return ret; 486 return ret;
@@ -1731,17 +1732,17 @@ static int verify_hello(struct ceph_connection *con)
1731 1732
1732static bool addr_is_blank(struct sockaddr_storage *ss) 1733static bool addr_is_blank(struct sockaddr_storage *ss)
1733{ 1734{
1735 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1736 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1737
1734 switch (ss->ss_family) { 1738 switch (ss->ss_family) {
1735 case AF_INET: 1739 case AF_INET:
1736 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1740 return addr->s_addr == htonl(INADDR_ANY);
1737 case AF_INET6: 1741 case AF_INET6:
1738 return 1742 return ipv6_addr_any(addr6);
1739 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1743 default:
1740 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1744 return true;
1741 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1742 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1743 } 1745 }
1744 return false;
1745} 1746}
1746 1747
1747static int addr_port(struct sockaddr_storage *ss) 1748static int addr_port(struct sockaddr_storage *ss)
@@ -2944,11 +2945,18 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
2944 msgr->tcp_nodelay = tcp_nodelay; 2945 msgr->tcp_nodelay = tcp_nodelay;
2945 2946
2946 atomic_set(&msgr->stopping, 0); 2947 atomic_set(&msgr->stopping, 0);
2948 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
2947 2949
2948 dout("%s %p\n", __func__, msgr); 2950 dout("%s %p\n", __func__, msgr);
2949} 2951}
2950EXPORT_SYMBOL(ceph_messenger_init); 2952EXPORT_SYMBOL(ceph_messenger_init);
2951 2953
2954void ceph_messenger_fini(struct ceph_messenger *msgr)
2955{
2956 put_net(read_pnet(&msgr->net));
2957}
2958EXPORT_SYMBOL(ceph_messenger_fini);
2959
2952static void clear_standby(struct ceph_connection *con) 2960static void clear_standby(struct ceph_connection *con)
2953{ 2961{
2954 /* come back from STANDBY? */ 2962 /* come back from STANDBY? */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b80fb91bb3f7..617088aee21d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
131 goto out; 131 goto out;
132} 132}
133 133
134static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
135{
136 struct sk_buff *nskb;
137
138 if (skb->peeked)
139 return skb;
140
141 /* We have to unshare an skb before modifying it. */
142 if (!skb_shared(skb))
143 goto done;
144
145 nskb = skb_clone(skb, GFP_ATOMIC);
146 if (!nskb)
147 return ERR_PTR(-ENOMEM);
148
149 skb->prev->next = nskb;
150 skb->next->prev = nskb;
151 nskb->prev = skb->prev;
152 nskb->next = skb->next;
153
154 consume_skb(skb);
155 skb = nskb;
156
157done:
158 skb->peeked = 1;
159
160 return skb;
161}
162
134/** 163/**
135 * __skb_recv_datagram - Receive a datagram skbuff 164 * __skb_recv_datagram - Receive a datagram skbuff
136 * @sk: socket 165 * @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
165struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 194struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
166 int *peeked, int *off, int *err) 195 int *peeked, int *off, int *err)
167{ 196{
197 struct sk_buff_head *queue = &sk->sk_receive_queue;
168 struct sk_buff *skb, *last; 198 struct sk_buff *skb, *last;
199 unsigned long cpu_flags;
169 long timeo; 200 long timeo;
170 /* 201 /*
171 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() 202 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
184 * Look at current nfs client by the way... 215 * Look at current nfs client by the way...
185 * However, this function was correct in any case. 8) 216 * However, this function was correct in any case. 8)
186 */ 217 */
187 unsigned long cpu_flags;
188 struct sk_buff_head *queue = &sk->sk_receive_queue;
189 int _off = *off; 218 int _off = *off;
190 219
191 last = (struct sk_buff *)queue; 220 last = (struct sk_buff *)queue;
@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
199 _off -= skb->len; 228 _off -= skb->len;
200 continue; 229 continue;
201 } 230 }
202 skb->peeked = 1; 231
232 skb = skb_set_peeked(skb);
233 error = PTR_ERR(skb);
234 if (IS_ERR(skb))
235 goto unlock_err;
236
203 atomic_inc(&skb->users); 237 atomic_inc(&skb->users);
204 } else 238 } else
205 __skb_unlink(skb, queue); 239 __skb_unlink(skb, queue);
@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
223 257
224 return NULL; 258 return NULL;
225 259
260unlock_err:
261 spin_unlock_irqrestore(&queue->lock, cpu_flags);
226no_packet: 262no_packet:
227 *err = error; 263 *err = error;
228 return NULL; 264 return NULL;
@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
622 !skb->csum_complete_sw) 658 !skb->csum_complete_sw)
623 netdev_rx_csum_fault(skb->dev); 659 netdev_rx_csum_fault(skb->dev);
624 } 660 }
625 skb->csum_valid = !sum; 661 if (!skb_shared(skb))
662 skb->csum_valid = !sum;
626 return sum; 663 return sum;
627} 664}
628EXPORT_SYMBOL(__skb_checksum_complete_head); 665EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
642 netdev_rx_csum_fault(skb->dev); 679 netdev_rx_csum_fault(skb->dev);
643 } 680 }
644 681
645 /* Save full packet checksum */ 682 if (!skb_shared(skb)) {
646 skb->csum = csum; 683 /* Save full packet checksum */
647 skb->ip_summed = CHECKSUM_COMPLETE; 684 skb->csum = csum;
648 skb->csum_complete_sw = 1; 685 skb->ip_summed = CHECKSUM_COMPLETE;
649 skb->csum_valid = !sum; 686 skb->csum_complete_sw = 1;
687 skb->csum_valid = !sum;
688 }
650 689
651 return sum; 690 return sum;
652} 691}
diff --git a/net/core/dev.c b/net/core/dev.c
index 6778a9999d52..a8e4dd430285 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -677,10 +677,6 @@ int dev_get_iflink(const struct net_device *dev)
677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) 677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
678 return dev->netdev_ops->ndo_get_iflink(dev); 678 return dev->netdev_ops->ndo_get_iflink(dev);
679 679
680 /* If dev->rtnl_link_ops is set, it's a virtual interface. */
681 if (dev->rtnl_link_ops)
682 return 0;
683
684 return dev->ifindex; 680 return dev->ifindex;
685} 681}
686EXPORT_SYMBOL(dev_get_iflink); 682EXPORT_SYMBOL(dev_get_iflink);
@@ -3452,6 +3448,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3452 local_irq_save(flags); 3448 local_irq_save(flags);
3453 3449
3454 rps_lock(sd); 3450 rps_lock(sd);
3451 if (!netif_running(skb->dev))
3452 goto drop;
3455 qlen = skb_queue_len(&sd->input_pkt_queue); 3453 qlen = skb_queue_len(&sd->input_pkt_queue);
3456 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { 3454 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3457 if (qlen) { 3455 if (qlen) {
@@ -3473,6 +3471,7 @@ enqueue:
3473 goto enqueue; 3471 goto enqueue;
3474 } 3472 }
3475 3473
3474drop:
3476 sd->dropped++; 3475 sd->dropped++;
3477 rps_unlock(sd); 3476 rps_unlock(sd);
3478 3477
@@ -3775,8 +3774,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3775 3774
3776 pt_prev = NULL; 3775 pt_prev = NULL;
3777 3776
3778 rcu_read_lock();
3779
3780another_round: 3777another_round:
3781 skb->skb_iif = skb->dev->ifindex; 3778 skb->skb_iif = skb->dev->ifindex;
3782 3779
@@ -3786,7 +3783,7 @@ another_round:
3786 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 3783 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3787 skb = skb_vlan_untag(skb); 3784 skb = skb_vlan_untag(skb);
3788 if (unlikely(!skb)) 3785 if (unlikely(!skb))
3789 goto unlock; 3786 goto out;
3790 } 3787 }
3791 3788
3792#ifdef CONFIG_NET_CLS_ACT 3789#ifdef CONFIG_NET_CLS_ACT
@@ -3816,10 +3813,10 @@ skip_taps:
3816 if (static_key_false(&ingress_needed)) { 3813 if (static_key_false(&ingress_needed)) {
3817 skb = handle_ing(skb, &pt_prev, &ret, orig_dev); 3814 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3818 if (!skb) 3815 if (!skb)
3819 goto unlock; 3816 goto out;
3820 3817
3821 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 3818 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
3822 goto unlock; 3819 goto out;
3823 } 3820 }
3824#endif 3821#endif
3825#ifdef CONFIG_NET_CLS_ACT 3822#ifdef CONFIG_NET_CLS_ACT
@@ -3837,7 +3834,7 @@ ncls:
3837 if (vlan_do_receive(&skb)) 3834 if (vlan_do_receive(&skb))
3838 goto another_round; 3835 goto another_round;
3839 else if (unlikely(!skb)) 3836 else if (unlikely(!skb))
3840 goto unlock; 3837 goto out;
3841 } 3838 }
3842 3839
3843 rx_handler = rcu_dereference(skb->dev->rx_handler); 3840 rx_handler = rcu_dereference(skb->dev->rx_handler);
@@ -3849,7 +3846,7 @@ ncls:
3849 switch (rx_handler(&skb)) { 3846 switch (rx_handler(&skb)) {
3850 case RX_HANDLER_CONSUMED: 3847 case RX_HANDLER_CONSUMED:
3851 ret = NET_RX_SUCCESS; 3848 ret = NET_RX_SUCCESS;
3852 goto unlock; 3849 goto out;
3853 case RX_HANDLER_ANOTHER: 3850 case RX_HANDLER_ANOTHER:
3854 goto another_round; 3851 goto another_round;
3855 case RX_HANDLER_EXACT: 3852 case RX_HANDLER_EXACT:
@@ -3903,8 +3900,7 @@ drop:
3903 ret = NET_RX_DROP; 3900 ret = NET_RX_DROP;
3904 } 3901 }
3905 3902
3906unlock: 3903out:
3907 rcu_read_unlock();
3908 return ret; 3904 return ret;
3909} 3905}
3910 3906
@@ -3935,29 +3931,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
3935 3931
3936static int netif_receive_skb_internal(struct sk_buff *skb) 3932static int netif_receive_skb_internal(struct sk_buff *skb)
3937{ 3933{
3934 int ret;
3935
3938 net_timestamp_check(netdev_tstamp_prequeue, skb); 3936 net_timestamp_check(netdev_tstamp_prequeue, skb);
3939 3937
3940 if (skb_defer_rx_timestamp(skb)) 3938 if (skb_defer_rx_timestamp(skb))
3941 return NET_RX_SUCCESS; 3939 return NET_RX_SUCCESS;
3942 3940
3941 rcu_read_lock();
3942
3943#ifdef CONFIG_RPS 3943#ifdef CONFIG_RPS
3944 if (static_key_false(&rps_needed)) { 3944 if (static_key_false(&rps_needed)) {
3945 struct rps_dev_flow voidflow, *rflow = &voidflow; 3945 struct rps_dev_flow voidflow, *rflow = &voidflow;
3946 int cpu, ret; 3946 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
3947
3948 rcu_read_lock();
3949
3950 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3951 3947
3952 if (cpu >= 0) { 3948 if (cpu >= 0) {
3953 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); 3949 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3954 rcu_read_unlock(); 3950 rcu_read_unlock();
3955 return ret; 3951 return ret;
3956 } 3952 }
3957 rcu_read_unlock();
3958 } 3953 }
3959#endif 3954#endif
3960 return __netif_receive_skb(skb); 3955 ret = __netif_receive_skb(skb);
3956 rcu_read_unlock();
3957 return ret;
3961} 3958}
3962 3959
3963/** 3960/**
@@ -4502,8 +4499,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
4502 struct sk_buff *skb; 4499 struct sk_buff *skb;
4503 4500
4504 while ((skb = __skb_dequeue(&sd->process_queue))) { 4501 while ((skb = __skb_dequeue(&sd->process_queue))) {
4502 rcu_read_lock();
4505 local_irq_enable(); 4503 local_irq_enable();
4506 __netif_receive_skb(skb); 4504 __netif_receive_skb(skb);
4505 rcu_read_unlock();
4507 local_irq_disable(); 4506 local_irq_disable();
4508 input_queue_head_incr(sd); 4507 input_queue_head_incr(sd);
4509 if (++work >= quota) { 4508 if (++work >= quota) {
@@ -6139,6 +6138,7 @@ static void rollback_registered_many(struct list_head *head)
6139 unlist_netdevice(dev); 6138 unlist_netdevice(dev);
6140 6139
6141 dev->reg_state = NETREG_UNREGISTERING; 6140 dev->reg_state = NETREG_UNREGISTERING;
6141 on_each_cpu(flush_backlog, dev, 1);
6142 } 6142 }
6143 6143
6144 synchronize_net(); 6144 synchronize_net();
@@ -6409,7 +6409,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
6409 struct netdev_queue *tx; 6409 struct netdev_queue *tx;
6410 size_t sz = count * sizeof(*tx); 6410 size_t sz = count * sizeof(*tx);
6411 6411
6412 BUG_ON(count < 1 || count > 0xffff); 6412 if (count < 1 || count > 0xffff)
6413 return -EINVAL;
6413 6414
6414 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); 6415 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6415 if (!tx) { 6416 if (!tx) {
@@ -6773,8 +6774,6 @@ void netdev_run_todo(void)
6773 6774
6774 dev->reg_state = NETREG_UNREGISTERED; 6775 dev->reg_state = NETREG_UNREGISTERED;
6775 6776
6776 on_each_cpu(flush_backlog, dev, 1);
6777
6778 netdev_wait_allrefs(dev); 6777 netdev_wait_allrefs(dev);
6779 6778
6780 /* paranoia */ 6779 /* paranoia */
diff --git a/net/core/dst.c b/net/core/dst.c
index e956ce6d1378..002144bea935 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
284 int newrefcnt; 284 int newrefcnt;
285 285
286 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
287 WARN_ON(newrefcnt < 0); 287 if (unlikely(newrefcnt < 0))
288 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
289 __func__, dst, newrefcnt);
288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
289 call_rcu(&dst->rcu_head, dst_destroy_rcu); 291 call_rcu(&dst->rcu_head, dst_destroy_rcu);
290 } 292 }
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9dfb88a933e7..92d886f4adcb 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,7 +66,7 @@
66 66
67 NOTES. 67 NOTES.
68 68
69 * avbps is scaled by 2^5, avpps is scaled by 2^10. 69 * avbps and avpps are scaled by 2^5.
70 * both values are reported as 32 bit unsigned values. bps can 70 * both values are reported as 32 bit unsigned values. bps can
71 overflow for fast links : max speed being 34360Mbit/sec 71 overflow for fast links : max speed being 34360Mbit/sec
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor 72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
@@ -85,10 +85,10 @@ struct gen_estimator
85 struct gnet_stats_rate_est64 *rate_est; 85 struct gnet_stats_rate_est64 *rate_est;
86 spinlock_t *stats_lock; 86 spinlock_t *stats_lock;
87 int ewma_log; 87 int ewma_log;
88 u32 last_packets;
89 unsigned long avpps;
88 u64 last_bytes; 90 u64 last_bytes;
89 u64 avbps; 91 u64 avbps;
90 u32 last_packets;
91 u32 avpps;
92 struct rcu_head e_rcu; 92 struct rcu_head e_rcu;
93 struct rb_node node; 93 struct rb_node node;
94 struct gnet_stats_basic_cpu __percpu *cpu_bstats; 94 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
@@ -118,8 +118,8 @@ static void est_timer(unsigned long arg)
118 rcu_read_lock(); 118 rcu_read_lock();
119 list_for_each_entry_rcu(e, &elist[idx].list, list) { 119 list_for_each_entry_rcu(e, &elist[idx].list, list) {
120 struct gnet_stats_basic_packed b = {0}; 120 struct gnet_stats_basic_packed b = {0};
121 unsigned long rate;
121 u64 brate; 122 u64 brate;
122 u32 rate;
123 123
124 spin_lock(e->stats_lock); 124 spin_lock(e->stats_lock);
125 read_lock(&est_lock); 125 read_lock(&est_lock);
@@ -133,10 +133,11 @@ static void est_timer(unsigned long arg)
133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log); 133 e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134 e->rate_est->bps = (e->avbps+0xF)>>5; 134 e->rate_est->bps = (e->avbps+0xF)>>5;
135 135
136 rate = (b.packets - e->last_packets)<<(12 - idx); 136 rate = b.packets - e->last_packets;
137 rate <<= (7 - idx);
137 e->last_packets = b.packets; 138 e->last_packets = b.packets;
138 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log); 139 e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
139 e->rate_est->pps = (e->avpps+0x1FF)>>10; 140 e->rate_est->pps = (e->avpps + 0xF) >> 5;
140skip: 141skip:
141 read_unlock(&est_lock); 142 read_unlock(&est_lock);
142 spin_unlock(e->stats_lock); 143 spin_unlock(e->stats_lock);
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
23 23
24struct cgroup_cls_state *task_cls_state(struct task_struct *p) 24struct cgroup_cls_state *task_cls_state(struct task_struct *p)
25{ 25{
26 return css_cls_state(task_css(p, net_cls_cgrp_id)); 26 return css_cls_state(task_css_check(p, net_cls_cgrp_id,
27 rcu_read_lock_bh_held()));
27} 28}
28EXPORT_SYMBOL_GPL(task_cls_state); 29EXPORT_SYMBOL_GPL(task_cls_state);
29 30
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 05badbb58865..1cbd209192ea 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
3514 3514
3515 set_freezable(); 3515 set_freezable();
3516 3516
3517 __set_current_state(TASK_RUNNING);
3518
3519 while (!kthread_should_stop()) { 3517 while (!kthread_should_stop()) {
3520 pkt_dev = next_to_run(t); 3518 pkt_dev = next_to_run(t);
3521 3519
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
3560 3558
3561 try_to_freeze(); 3559 try_to_freeze();
3562 } 3560 }
3563 set_current_state(TASK_INTERRUPTIBLE);
3564 3561
3565 pr_debug("%s stopping all device\n", t->tsk->comm); 3562 pr_debug("%s stopping all device\n", t->tsk->comm);
3566 pktgen_stop(t); 3563 pktgen_stop(t);
@@ -3571,13 +3568,6 @@ static int pktgen_thread_worker(void *arg)
3571 pr_debug("%s removing thread\n", t->tsk->comm); 3568 pr_debug("%s removing thread\n", t->tsk->comm);
3572 pktgen_rem_thread(t); 3569 pktgen_rem_thread(t);
3573 3570
3574 /* Wait for kthread_stop */
3575 while (!kthread_should_stop()) {
3576 set_current_state(TASK_INTERRUPTIBLE);
3577 schedule();
3578 }
3579 __set_current_state(TASK_RUNNING);
3580
3581 return 0; 3571 return 0;
3582} 3572}
3583 3573
@@ -3769,6 +3759,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
3769 } 3759 }
3770 3760
3771 t->net = pn; 3761 t->net = pn;
3762 get_task_struct(p);
3772 wake_up_process(p); 3763 wake_up_process(p);
3773 wait_for_completion(&t->start_done); 3764 wait_for_completion(&t->start_done);
3774 3765
@@ -3891,6 +3882,7 @@ static void __net_exit pg_net_exit(struct net *net)
3891 t = list_entry(q, struct pktgen_thread, th_list); 3882 t = list_entry(q, struct pktgen_thread, th_list);
3892 list_del(&t->th_list); 3883 list_del(&t->th_list);
3893 kthread_stop(t->tsk); 3884 kthread_stop(t->tsk);
3885 put_task_struct(t->tsk);
3894 kfree(t); 3886 kfree(t);
3895 } 3887 }
3896 3888
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0bc08c..b42f0e26f89e 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
103 spin_lock_bh(&queue->syn_wait_lock); 103 spin_lock_bh(&queue->syn_wait_lock);
104 while ((req = lopt->syn_table[i]) != NULL) { 104 while ((req = lopt->syn_table[i]) != NULL) {
105 lopt->syn_table[i] = req->dl_next; 105 lopt->syn_table[i] = req->dl_next;
106 /* Because of following del_timer_sync(),
107 * we must release the spinlock here
108 * or risk a dead lock.
109 */
110 spin_unlock_bh(&queue->syn_wait_lock);
106 atomic_inc(&lopt->qlen_dec); 111 atomic_inc(&lopt->qlen_dec);
107 if (del_timer(&req->rsk_timer)) 112 if (del_timer_sync(&req->rsk_timer))
108 reqsk_put(req); 113 reqsk_put(req);
109 reqsk_put(req); 114 reqsk_put(req);
115 spin_lock_bh(&queue->syn_wait_lock);
110 } 116 }
111 spin_unlock_bh(&queue->syn_wait_lock); 117 spin_unlock_bh(&queue->syn_wait_lock);
112 } 118 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 01ced4a889e0..dc004b1e1f85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1328,10 +1328,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, 1328 [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED },
1329}; 1329};
1330 1330
1331static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
1332 [IFLA_VF_INFO] = { .type = NLA_NESTED },
1333};
1334
1335static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 1331static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
1336 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, 1332 [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
1337 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, 1333 [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
@@ -1488,96 +1484,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
1488 return 0; 1484 return 0;
1489} 1485}
1490 1486
1491static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) 1487static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
1492{ 1488{
1493 int rem, err = -EINVAL;
1494 struct nlattr *vf;
1495 const struct net_device_ops *ops = dev->netdev_ops; 1489 const struct net_device_ops *ops = dev->netdev_ops;
1490 int err = -EINVAL;
1496 1491
1497 nla_for_each_nested(vf, attr, rem) { 1492 if (tb[IFLA_VF_MAC]) {
1498 switch (nla_type(vf)) { 1493 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
1499 case IFLA_VF_MAC: {
1500 struct ifla_vf_mac *ivm;
1501 ivm = nla_data(vf);
1502 err = -EOPNOTSUPP;
1503 if (ops->ndo_set_vf_mac)
1504 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1505 ivm->mac);
1506 break;
1507 }
1508 case IFLA_VF_VLAN: {
1509 struct ifla_vf_vlan *ivv;
1510 ivv = nla_data(vf);
1511 err = -EOPNOTSUPP;
1512 if (ops->ndo_set_vf_vlan)
1513 err = ops->ndo_set_vf_vlan(dev, ivv->vf,
1514 ivv->vlan,
1515 ivv->qos);
1516 break;
1517 }
1518 case IFLA_VF_TX_RATE: {
1519 struct ifla_vf_tx_rate *ivt;
1520 struct ifla_vf_info ivf;
1521 ivt = nla_data(vf);
1522 err = -EOPNOTSUPP;
1523 if (ops->ndo_get_vf_config)
1524 err = ops->ndo_get_vf_config(dev, ivt->vf,
1525 &ivf);
1526 if (err)
1527 break;
1528 err = -EOPNOTSUPP;
1529 if (ops->ndo_set_vf_rate)
1530 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1531 ivf.min_tx_rate,
1532 ivt->rate);
1533 break;
1534 }
1535 case IFLA_VF_RATE: {
1536 struct ifla_vf_rate *ivt;
1537 ivt = nla_data(vf);
1538 err = -EOPNOTSUPP;
1539 if (ops->ndo_set_vf_rate)
1540 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1541 ivt->min_tx_rate,
1542 ivt->max_tx_rate);
1543 break;
1544 }
1545 case IFLA_VF_SPOOFCHK: {
1546 struct ifla_vf_spoofchk *ivs;
1547 ivs = nla_data(vf);
1548 err = -EOPNOTSUPP;
1549 if (ops->ndo_set_vf_spoofchk)
1550 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1551 ivs->setting);
1552 break;
1553 }
1554 case IFLA_VF_LINK_STATE: {
1555 struct ifla_vf_link_state *ivl;
1556 ivl = nla_data(vf);
1557 err = -EOPNOTSUPP;
1558 if (ops->ndo_set_vf_link_state)
1559 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1560 ivl->link_state);
1561 break;
1562 }
1563 case IFLA_VF_RSS_QUERY_EN: {
1564 struct ifla_vf_rss_query_en *ivrssq_en;
1565 1494
1566 ivrssq_en = nla_data(vf); 1495 err = -EOPNOTSUPP;
1567 err = -EOPNOTSUPP; 1496 if (ops->ndo_set_vf_mac)
1568 if (ops->ndo_set_vf_rss_query_en) 1497 err = ops->ndo_set_vf_mac(dev, ivm->vf,
1569 err = ops->ndo_set_vf_rss_query_en(dev, 1498 ivm->mac);
1570 ivrssq_en->vf, 1499 if (err < 0)
1571 ivrssq_en->setting); 1500 return err;
1572 break; 1501 }
1573 } 1502
1574 default: 1503 if (tb[IFLA_VF_VLAN]) {
1575 err = -EINVAL; 1504 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
1576 break; 1505
1577 } 1506 err = -EOPNOTSUPP;
1578 if (err) 1507 if (ops->ndo_set_vf_vlan)
1579 break; 1508 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
1509 ivv->qos);
1510 if (err < 0)
1511 return err;
1512 }
1513
1514 if (tb[IFLA_VF_TX_RATE]) {
1515 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
1516 struct ifla_vf_info ivf;
1517
1518 err = -EOPNOTSUPP;
1519 if (ops->ndo_get_vf_config)
1520 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
1521 if (err < 0)
1522 return err;
1523
1524 err = -EOPNOTSUPP;
1525 if (ops->ndo_set_vf_rate)
1526 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1527 ivf.min_tx_rate,
1528 ivt->rate);
1529 if (err < 0)
1530 return err;
1531 }
1532
1533 if (tb[IFLA_VF_RATE]) {
1534 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
1535
1536 err = -EOPNOTSUPP;
1537 if (ops->ndo_set_vf_rate)
1538 err = ops->ndo_set_vf_rate(dev, ivt->vf,
1539 ivt->min_tx_rate,
1540 ivt->max_tx_rate);
1541 if (err < 0)
1542 return err;
1543 }
1544
1545 if (tb[IFLA_VF_SPOOFCHK]) {
1546 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
1547
1548 err = -EOPNOTSUPP;
1549 if (ops->ndo_set_vf_spoofchk)
1550 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
1551 ivs->setting);
1552 if (err < 0)
1553 return err;
1580 } 1554 }
1555
1556 if (tb[IFLA_VF_LINK_STATE]) {
1557 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
1558
1559 err = -EOPNOTSUPP;
1560 if (ops->ndo_set_vf_link_state)
1561 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
1562 ivl->link_state);
1563 if (err < 0)
1564 return err;
1565 }
1566
1567 if (tb[IFLA_VF_RSS_QUERY_EN]) {
1568 struct ifla_vf_rss_query_en *ivrssq_en;
1569
1570 err = -EOPNOTSUPP;
1571 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
1572 if (ops->ndo_set_vf_rss_query_en)
1573 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
1574 ivrssq_en->setting);
1575 if (err < 0)
1576 return err;
1577 }
1578
1581 return err; 1579 return err;
1582} 1580}
1583 1581
@@ -1773,14 +1771,21 @@ static int do_setlink(const struct sk_buff *skb,
1773 } 1771 }
1774 1772
1775 if (tb[IFLA_VFINFO_LIST]) { 1773 if (tb[IFLA_VFINFO_LIST]) {
1774 struct nlattr *vfinfo[IFLA_VF_MAX + 1];
1776 struct nlattr *attr; 1775 struct nlattr *attr;
1777 int rem; 1776 int rem;
1777
1778 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 1778 nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
1779 if (nla_type(attr) != IFLA_VF_INFO) { 1779 if (nla_type(attr) != IFLA_VF_INFO ||
1780 nla_len(attr) < NLA_HDRLEN) {
1780 err = -EINVAL; 1781 err = -EINVAL;
1781 goto errout; 1782 goto errout;
1782 } 1783 }
1783 err = do_setvfinfo(dev, attr); 1784 err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
1785 ifla_vf_policy);
1786 if (err < 0)
1787 goto errout;
1788 err = do_setvfinfo(dev, vfinfo);
1784 if (err < 0) 1789 if (err < 0)
1785 goto errout; 1790 goto errout;
1786 status |= DO_SETLINK_NOTIFY; 1791 status |= DO_SETLINK_NOTIFY;
@@ -1799,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
1799 goto errout; 1804 goto errout;
1800 1805
1801 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
1802 if (nla_type(attr) != IFLA_VF_PORT) 1807 if (nla_type(attr) != IFLA_VF_PORT ||
1803 continue; 1808 nla_len(attr) < NLA_HDRLEN) {
1804 err = nla_parse_nested(port, IFLA_PORT_MAX, 1809 err = -EINVAL;
1805 attr, ifla_port_policy); 1810 goto errout;
1811 }
1812 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
1813 ifla_port_policy);
1806 if (err < 0) 1814 if (err < 0)
1807 goto errout; 1815 goto errout;
1808 if (!port[IFLA_PORT_VF]) { 1816 if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca0f99e..7b84330e5d30 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
340 340
341 if (skb && frag_size) { 341 if (skb && frag_size) {
342 skb->head_frag = 1; 342 skb->head_frag = 1;
343 if (virt_to_head_page(data)->pfmemalloc) 343 if (page_is_pfmemalloc(virt_to_head_page(data)))
344 skb->pfmemalloc = 1; 344 skb->pfmemalloc = 1;
345 } 345 }
346 return skb; 346 return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
4022 * Otherwise returns the provided skb. Returns NULL in error cases 4022 * Otherwise returns the provided skb. Returns NULL in error cases
4023 * (e.g. transport_len exceeds skb length or out-of-memory). 4023 * (e.g. transport_len exceeds skb length or out-of-memory).
4024 * 4024 *
4025 * Caller needs to set the skb transport header and release the returned skb. 4025 * Caller needs to set the skb transport header and free any returned skb if it
4026 * Provided skb is consumed. 4026 * differs from the provided skb.
4027 */ 4027 */
4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4028static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4029 unsigned int transport_len) 4029 unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4032 unsigned int len = skb_transport_offset(skb) + transport_len; 4032 unsigned int len = skb_transport_offset(skb) + transport_len;
4033 int ret; 4033 int ret;
4034 4034
4035 if (skb->len < len) { 4035 if (skb->len < len)
4036 kfree_skb(skb);
4037 return NULL; 4036 return NULL;
4038 } else if (skb->len == len) { 4037 else if (skb->len == len)
4039 return skb; 4038 return skb;
4040 }
4041 4039
4042 skb_chk = skb_clone(skb, GFP_ATOMIC); 4040 skb_chk = skb_clone(skb, GFP_ATOMIC);
4043 kfree_skb(skb);
4044
4045 if (!skb_chk) 4041 if (!skb_chk)
4046 return NULL; 4042 return NULL;
4047 4043
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4066 * If the skb has data beyond the given transport length, then a 4062 * If the skb has data beyond the given transport length, then a
4067 * trimmed & cloned skb is checked and returned. 4063 * trimmed & cloned skb is checked and returned.
4068 * 4064 *
4069 * Caller needs to set the skb transport header and release the returned skb. 4065 * Caller needs to set the skb transport header and free any returned skb if it
4070 * Provided skb is consumed. 4066 * differs from the provided skb.
4071 */ 4067 */
4072struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4068struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4073 unsigned int transport_len, 4069 unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4079 4075
4080 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4076 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4081 if (!skb_chk) 4077 if (!skb_chk)
4082 return NULL; 4078 goto err;
4083 4079
4084 if (!pskb_may_pull(skb_chk, offset)) { 4080 if (!pskb_may_pull(skb_chk, offset))
4085 kfree_skb(skb_chk); 4081 goto err;
4086 return NULL;
4087 }
4088 4082
4089 __skb_pull(skb_chk, offset); 4083 __skb_pull(skb_chk, offset);
4090 ret = skb_chkf(skb_chk); 4084 ret = skb_chkf(skb_chk);
4091 __skb_push(skb_chk, offset); 4085 __skb_push(skb_chk, offset);
4092 4086
4093 if (ret) { 4087 if (ret)
4094 kfree_skb(skb_chk); 4088 goto err;
4095 return NULL;
4096 }
4097 4089
4098 return skb_chk; 4090 return skb_chk;
4091
4092err:
4093 if (skb_chk && skb_chk != skb)
4094 kfree_skb(skb_chk);
4095
4096 return NULL;
4097
4099} 4098}
4100EXPORT_SYMBOL(skb_checksum_trimmed); 4099EXPORT_SYMBOL(skb_checksum_trimmed);
4101 4100
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1497 sock_copy(newsk, sk); 1497 sock_copy(newsk, sk);
1498 1498
1499 /* SANITY */ 1499 /* SANITY */
1500 get_net(sock_net(newsk)); 1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1501 sk_node_init(&newsk->sk_node); 1502 sk_node_init(&newsk->sk_node);
1502 sock_lock_init(newsk); 1503 sock_lock_init(newsk);
1503 bh_lock_sock(newsk); 1504 bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1968 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1969 * @sk: sock to wait on
1969 * @timeo: for how long 1970 * @timeo: for how long
1971 * @skb: last skb seen on sk_receive_queue
1970 * 1972 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1973 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1974 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1975 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1976 * it is very likely that release_sock() added new data.
1975 */ 1977 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1978int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1979{
1978 int rc; 1980 int rc;
1979 DEFINE_WAIT(wait); 1981 DEFINE_WAIT(wait);
1980 1982
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1984 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1985 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1986 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1987 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1988 return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
886 break; 886 break;
887 } 887 }
888 888
889 sk_wait_data(sk, &timeo); 889 sk_wait_data(sk, &timeo, NULL);
890 continue; 890 continue;
891 found_ok_skb: 891 found_ok_skb:
892 if (len > skb->len) 892 if (len > skb->len)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 392e29a0227d..b445d492c115 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -630,7 +630,7 @@ static int dsa_of_probe(struct device *dev)
630 continue; 630 continue;
631 631
632 cd->sw_addr = be32_to_cpup(sw_addr); 632 cd->sw_addr = be32_to_cpup(sw_addr);
633 if (cd->sw_addr > PHY_MAX_ADDR) 633 if (cd->sw_addr >= PHY_MAX_ADDR)
634 continue; 634 continue;
635 635
636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) 636 if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
@@ -642,6 +642,8 @@ static int dsa_of_probe(struct device *dev)
642 continue; 642 continue;
643 643
644 port_index = be32_to_cpup(port_reg); 644 port_index = be32_to_cpup(port_reg);
645 if (port_index >= DSA_MAX_PORTS)
646 break;
645 647
646 port_name = of_get_property(port, "label", NULL); 648 port_name = of_get_property(port, "label", NULL);
647 if (!port_name) 649 if (!port_name)
@@ -666,8 +668,6 @@ static int dsa_of_probe(struct device *dev)
666 goto out_free_chip; 668 goto out_free_chip;
667 } 669 }
668 670
669 if (port_index == DSA_MAX_PORTS)
670 break;
671 } 671 }
672 } 672 }
673 673
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123790ea..35c47ddd04f0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
756 return -ENODEV; 756 return -ENODEV;
757 757
758 /* Use already configured phy mode */ 758 /* Use already configured phy mode */
759 p->phy_interface = p->phy->interface; 759 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
760 p->phy_interface = p->phy->interface;
760 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 761 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
761 p->phy_interface); 762 p->phy_interface);
762 763
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
207 } else { 207 } else {
208 fq->q.meat += skb->len; 208 fq->q.meat += skb->len;
209 } 209 }
210 add_frag_mem_limit(&fq->q, skb->truesize); 210 add_frag_mem_limit(fq->q.net, skb->truesize);
211 211
212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 fq->q.meat == fq->q.len) { 213 fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
287 clone->data_len = clone->len; 287 clone->data_len = clone->len;
288 head->data_len -= clone->len; 288 head->data_len -= clone->len;
289 head->len -= clone->len; 289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize); 290 add_frag_mem_limit(fq->q.net, clone->truesize);
291 } 291 }
292 292
293 WARN_ON(head == NULL); 293 WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
310 } 310 }
311 fp = next; 311 fp = next;
312 } 312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize); 313 sub_frag_mem_limit(fq->q.net, sum_truesize);
314 314
315 head->next = NULL; 315 head->next = NULL;
316 head->dev = dev; 316 head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1017 1017
1018 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1018 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1019 if (neigh) { 1019 if (neigh) {
1020 read_lock_bh(&neigh->lock); 1020 if (!(neigh->nud_state & NUD_NOARP)) {
1021 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); 1021 read_lock_bh(&neigh->lock);
1022 r->arp_flags = arp_state_to_flags(neigh); 1022 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
1023 read_unlock_bh(&neigh->lock); 1023 r->arp_flags = arp_state_to_flags(neigh);
1024 r->arp_ha.sa_family = dev->type; 1024 read_unlock_bh(&neigh->lock);
1025 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); 1025 r->arp_ha.sa_family = dev->type;
1026 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
1027 err = 0;
1028 }
1026 neigh_release(neigh); 1029 neigh_release(neigh);
1027 err = 0;
1028 } 1030 }
1029 return err; 1031 return err;
1030} 1032}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 90c0e8386116..574fad9cca05 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21#include <net/tcp_states.h> 21#include <net/tcp_states.h>
22 22
23int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
39 39
40 sk_dst_reset(sk); 40 sk_dst_reset(sk);
41 41
42 lock_sock(sk);
43
44 oif = sk->sk_bound_dev_if; 42 oif = sk->sk_bound_dev_if;
45 saddr = inet->inet_saddr; 43 saddr = inet->inet_saddr;
46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 44 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
82 sk_dst_set(sk, &rt->dst); 80 sk_dst_set(sk, &rt->dst);
83 err = 0; 81 err = 0;
84out: 82out:
85 release_sock(sk);
86 return err; 83 return err;
87} 84}
85EXPORT_SYMBOL(__ip4_datagram_connect);
86
87int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
88{
89 int res;
90
91 lock_sock(sk);
92 res = __ip4_datagram_connect(sk, uaddr, addr_len);
93 release_sock(sk);
94 return res;
95}
88EXPORT_SYMBOL(ip4_datagram_connect); 96EXPORT_SYMBOL(ip4_datagram_connect);
89 97
90/* Because UDP xmit path can manipulate sk_dst_cache without holding 98/* Because UDP xmit path can manipulate sk_dst_cache without holding
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7498716e8f54..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
882 queue_delayed_work(system_power_efficient_wq, 882 queue_delayed_work(system_power_efficient_wq,
883 &check_lifetime_work, 0); 883 &check_lifetime_work, 0);
884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
885 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
886 } 885 }
887 return 0; 886 return 0;
888} 887}
@@ -1740,6 +1739,8 @@ static int inet_netconf_msgsize_devconf(int type)
1740 size += nla_total_size(4); 1739 size += nla_total_size(4);
1741 if (type == -1 || type == NETCONFA_PROXY_NEIGH) 1740 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1742 size += nla_total_size(4); 1741 size += nla_total_size(4);
1742 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1743 size += nla_total_size(4);
1743 1744
1744 return size; 1745 return size;
1745} 1746}
@@ -1780,6 +1781,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1780 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, 1781 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1781 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) 1782 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1782 goto nla_put_failure; 1783 goto nla_put_failure;
1784 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1785 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1786 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1787 goto nla_put_failure;
1783 1788
1784 nlmsg_end(skb, nlh); 1789 nlmsg_end(skb, nlh);
1785 return 0; 1790 return 0;
@@ -1819,6 +1824,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1819 [NETCONFA_FORWARDING] = { .len = sizeof(int) }, 1824 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1820 [NETCONFA_RP_FILTER] = { .len = sizeof(int) }, 1825 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1821 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) }, 1826 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1827 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1822}; 1828};
1823 1829
1824static int inet_netconf_get_devconf(struct sk_buff *in_skb, 1830static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -2048,6 +2054,12 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
2048 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH, 2054 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2049 ifindex, cnf); 2055 ifindex, cnf);
2050 } 2056 }
2057 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2058 new_value != old_value) {
2059 ifindex = devinet_conf_ifindex(net, cnf);
2060 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2061 ifindex, cnf);
2062 }
2051 } 2063 }
2052 2064
2053 return ret; 2065 return ret;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
13 u8 fa_state; 13 u8 fa_state;
14 u8 fa_slen; 14 u8 fa_slen;
15 u32 tb_id; 15 u32 tb_id;
16 s16 fa_default;
16 struct rcu_head rcu; 17 struct rcu_head rcu;
17}; 18};
18 19
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1202} 1202}
1203 1203
1204/* Must be invoked inside of an RCU protected region. */ 1204/* Must be invoked inside of an RCU protected region. */
1205void fib_select_default(struct fib_result *res) 1205void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1206{ 1206{
1207 struct fib_info *fi = NULL, *last_resort = NULL; 1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head; 1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table; 1209 struct fib_table *tb = res->table;
1210 u8 slen = 32 - res->prefixlen;
1210 int order = -1, last_idx = -1; 1211 int order = -1, last_idx = -1;
1211 struct fib_alias *fa; 1212 struct fib_alias *fa, *fa1 = NULL;
1213 u32 last_prio = res->fi->fib_priority;
1214 u8 last_tos = 0;
1212 1215
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) { 1216 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info; 1217 struct fib_info *next_fi = fa->fa_info;
1215 1218
1219 if (fa->fa_slen != slen)
1220 continue;
1221 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1222 continue;
1223 if (fa->tb_id != tb->tb_id)
1224 continue;
1225 if (next_fi->fib_priority > last_prio &&
1226 fa->fa_tos == last_tos) {
1227 if (last_tos)
1228 continue;
1229 break;
1230 }
1231 if (next_fi->fib_flags & RTNH_F_DEAD)
1232 continue;
1233 last_tos = fa->fa_tos;
1234 last_prio = next_fi->fib_priority;
1235
1216 if (next_fi->fib_scope != res->scope || 1236 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST) 1237 fa->fa_type != RTN_UNICAST)
1218 continue; 1238 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw || 1239 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1240 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue; 1241 continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
1228 if (!fi) { 1245 if (!fi) {
1229 if (next_fi != res->fi) 1246 if (next_fi != res->fi)
1230 break; 1247 break;
1248 fa1 = fa;
1231 } else if (!fib_detect_death(fi, order, &last_resort, 1249 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) { 1250 &last_idx, fa1->fa_default)) {
1233 fib_result_assign(res, fi); 1251 fib_result_assign(res, fi);
1234 tb->tb_default = order; 1252 fa1->fa_default = order;
1235 goto out; 1253 goto out;
1236 } 1254 }
1237 fi = next_fi; 1255 fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
1239 } 1257 }
1240 1258
1241 if (order <= 0 || !fi) { 1259 if (order <= 0 || !fi) {
1242 tb->tb_default = -1; 1260 if (fa1)
1261 fa1->fa_default = -1;
1243 goto out; 1262 goto out;
1244 } 1263 }
1245 1264
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1265 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) { 1266 fa1->fa_default)) {
1248 fib_result_assign(res, fi); 1267 fib_result_assign(res, fi);
1249 tb->tb_default = order; 1268 fa1->fa_default = order;
1250 goto out; 1269 goto out;
1251 } 1270 }
1252 1271
1253 if (last_idx >= 0) 1272 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort); 1273 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx; 1274 fa1->fa_default = last_idx;
1256out: 1275out:
1257 return; 1276 return;
1258} 1277}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..b0c6258ffb79 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1171 new_fa->fa_state = state & ~FA_S_ACCESSED; 1171 new_fa->fa_state = state & ~FA_S_ACCESSED;
1172 new_fa->fa_slen = fa->fa_slen; 1172 new_fa->fa_slen = fa->fa_slen;
1173 new_fa->tb_id = tb->tb_id; 1173 new_fa->tb_id = tb->tb_id;
1174 new_fa->fa_default = -1;
1174 1175
1175 err = switchdev_fib_ipv4_add(key, plen, fi, 1176 err = switchdev_fib_ipv4_add(key, plen, fi,
1176 new_fa->fa_tos, 1177 new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1222 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1223 new_fa->fa_slen = slen; 1224 new_fa->fa_slen = slen;
1224 new_fa->tb_id = tb->tb_id; 1225 new_fa->tb_id = tb->tb_id;
1226 new_fa->fa_default = -1;
1225 1227
1226 /* (Optionally) offload fib entry to switch hardware. */ 1228 /* (Optionally) offload fib entry to switch hardware. */
1227 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, 1229 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
1791 if (hlist_empty(&n->leaf)) { 1793 if (hlist_empty(&n->leaf)) {
1792 put_child_root(pn, n->key, NULL); 1794 put_child_root(pn, n->key, NULL);
1793 node_free(n); 1795 node_free(n);
1794 } else {
1795 leaf_pull_suffix(pn, n);
1796 } 1796 }
1797 } 1797 }
1798} 1798}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
1862 if (hlist_empty(&n->leaf)) { 1862 if (hlist_empty(&n->leaf)) {
1863 put_child_root(pn, n->key, NULL); 1863 put_child_root(pn, n->key, NULL);
1864 node_free(n); 1864 node_free(n);
1865 } else {
1866 leaf_pull_suffix(pn, n);
1867 } 1865 }
1868 } 1866 }
1869 1867
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1990 return NULL; 1988 return NULL;
1991 1989
1992 tb->tb_id = id; 1990 tb->tb_id = id;
1993 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1991 tb->tb_num_default = 0;
1995 tb->tb_data = (alias ? alias->__data : tb->__data); 1992 tb->tb_data = (alias ? alias->__data : tb->__data);
1996 1993
@@ -2468,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2468 key = l->key + 1; 2465 key = l->key + 1;
2469 iter->pos++; 2466 iter->pos++;
2470 2467
2471 if (pos-- <= 0) 2468 if (--pos <= 0)
2472 break; 2469 break;
2473 2470
2474 l = NULL; 2471 l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf648ec4..9fdfd9deac11 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1435 struct sk_buff *skb_chk; 1435 struct sk_buff *skb_chk;
1436 unsigned int transport_len; 1436 unsigned int transport_len;
1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); 1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
1438 int ret; 1438 int ret = -EINVAL;
1439 1439
1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); 1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
1441 1441
1442 skb_get(skb);
1443 skb_chk = skb_checksum_trimmed(skb, transport_len, 1442 skb_chk = skb_checksum_trimmed(skb, transport_len,
1444 ip_mc_validate_checksum); 1443 ip_mc_validate_checksum);
1445 if (!skb_chk) 1444 if (!skb_chk)
1446 return -EINVAL; 1445 goto err;
1447 1446
1448 if (!pskb_may_pull(skb_chk, len)) { 1447 if (!pskb_may_pull(skb_chk, len))
1449 kfree_skb(skb_chk); 1448 goto err;
1450 return -EINVAL;
1451 }
1452 1449
1453 ret = ip_mc_check_igmp_msg(skb_chk); 1450 ret = ip_mc_check_igmp_msg(skb_chk);
1454 if (ret) { 1451 if (ret)
1455 kfree_skb(skb_chk); 1452 goto err;
1456 return ret;
1457 }
1458 1453
1459 if (skb_trimmed) 1454 if (skb_trimmed)
1460 *skb_trimmed = skb_chk; 1455 *skb_trimmed = skb_chk;
1461 else 1456 /* free now unneeded clone */
1457 else if (skb_chk != skb)
1462 kfree_skb(skb_chk); 1458 kfree_skb(skb_chk);
1463 1459
1464 return 0; 1460 ret = 0;
1461
1462err:
1463 if (ret && skb_chk && skb_chk != skb)
1464 kfree_skb(skb_chk);
1465
1466 return ret;
1465} 1467}
1466 1468
1467/** 1469/**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1470 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) 1472 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
1471 * 1473 *
1472 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets 1474 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
1473 * skb network and transport headers accordingly and returns zero. 1475 * skb transport header accordingly and returns zero.
1474 * 1476 *
1475 * -EINVAL: A broken packet was detected, i.e. it violates some internet 1477 * -EINVAL: A broken packet was detected, i.e. it violates some internet
1476 * standard 1478 * standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1485 * to leave the original skb and its full frame unchanged (which might be 1487 * to leave the original skb and its full frame unchanged (which might be
1486 * desirable for layer 2 frame jugglers). 1488 * desirable for layer 2 frame jugglers).
1487 * 1489 *
1488 * The caller needs to release a reference count from any returned skb_trimmed. 1490 * Caller needs to set the skb network header and free any returned skb if it
1491 * differs from the provided skb.
1489 */ 1492 */
1490int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) 1493int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1491{ 1494{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0d9326..134957159c27 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
593 } 593 }
594 594
595 spin_unlock(&queue->syn_wait_lock); 595 spin_unlock(&queue->syn_wait_lock);
596 if (del_timer(&req->rsk_timer)) 596 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
597 reqsk_put(req); 597 reqsk_put(req);
598 return found; 598 return found;
599} 599}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 9bc26677058e..c3b1f3a0f4cf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -152,8 +152,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
152 inet6_sk(sk)->tclass) < 0) 152 inet6_sk(sk)->tclass) < 0)
153 goto errout; 153 goto errout;
154 154
155 if (ipv6_only_sock(sk) && 155 if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, 1)) 156 nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
157 goto errout; 157 goto errout;
158 } 158 }
159#endif 159#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 unsigned int evicted = 0; 131 unsigned int evicted = 0;
132 HLIST_HEAD(expired); 132 HLIST_HEAD(expired);
133 133
134evict_again:
135 spin_lock(&hb->chain_lock); 134 spin_lock(&hb->chain_lock);
136 135
137 hlist_for_each_entry_safe(fq, n, &hb->chain, list) { 136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138 if (!inet_fragq_should_evict(fq)) 137 if (!inet_fragq_should_evict(fq))
139 continue; 138 continue;
140 139
141 if (!del_timer(&fq->timer)) { 140 if (!del_timer(&fq->timer))
142 /* q expiring right now thus increment its refcount so 141 continue;
143 * it won't be freed under us and wait until the timer
144 * has finished executing then destroy it
145 */
146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer);
149 inet_frag_put(fq, f);
150 goto evict_again;
151 }
152 142
153 fq->flags |= INET_FRAG_EVICTED; 143 hlist_add_head(&fq->list_evictor, &expired);
154 hlist_del(&fq->list);
155 hlist_add_head(&fq->list, &expired);
156 ++evicted; 144 ++evicted;
157 } 145 }
158 146
159 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
160 148
161 hlist_for_each_entry_safe(fq, n, &expired, list) 149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
162 f->frag_expire((unsigned long) fq); 150 f->frag_expire((unsigned long) fq);
163 151
164 return evicted; 152 return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
240 int i; 228 int i;
241 229
242 nf->low_thresh = 0; 230 nf->low_thresh = 0;
243 local_bh_disable();
244 231
245evict_again: 232evict_again:
233 local_bh_disable();
246 seq = read_seqbegin(&f->rnd_seqlock); 234 seq = read_seqbegin(&f->rnd_seqlock);
247 235
248 for (i = 0; i < INETFRAGS_HASHSZ ; i++) 236 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249 inet_evict_bucket(f, &f->hash[i]); 237 inet_evict_bucket(f, &f->hash[i]);
250 238
251 if (read_seqretry(&f->rnd_seqlock, seq))
252 goto evict_again;
253
254 local_bh_enable(); 239 local_bh_enable();
240 cond_resched();
241
242 if (read_seqretry(&f->rnd_seqlock, seq) ||
243 percpu_counter_sum(&nf->mem))
244 goto evict_again;
255 245
256 percpu_counter_destroy(&nf->mem); 246 percpu_counter_destroy(&nf->mem);
257} 247}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
284 struct inet_frag_bucket *hb; 274 struct inet_frag_bucket *hb;
285 275
286 hb = get_frag_bucket_locked(fq, f); 276 hb = get_frag_bucket_locked(fq, f);
287 if (!(fq->flags & INET_FRAG_EVICTED)) 277 hlist_del(&fq->list);
288 hlist_del(&fq->list); 278 fq->flags |= INET_FRAG_COMPLETE;
289 spin_unlock(&hb->chain_lock); 279 spin_unlock(&hb->chain_lock);
290} 280}
291 281
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
297 if (!(fq->flags & INET_FRAG_COMPLETE)) { 287 if (!(fq->flags & INET_FRAG_COMPLETE)) {
298 fq_unlink(fq, f); 288 fq_unlink(fq, f);
299 atomic_dec(&fq->refcnt); 289 atomic_dec(&fq->refcnt);
300 fq->flags |= INET_FRAG_COMPLETE;
301 } 290 }
302} 291}
303EXPORT_SYMBOL(inet_frag_kill); 292EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
330 fp = xp; 319 fp = xp;
331 } 320 }
332 sum = sum_truesize + f->qsize; 321 sum = sum_truesize + f->qsize;
333 sub_frag_mem_limit(q, sum);
334 322
335 if (f->destructor) 323 if (f->destructor)
336 f->destructor(q); 324 f->destructor(q);
337 kmem_cache_free(f->frags_cachep, q); 325 kmem_cache_free(f->frags_cachep, q);
326
327 sub_frag_mem_limit(nf, sum);
338} 328}
339EXPORT_SYMBOL(inet_frag_destroy); 329EXPORT_SYMBOL(inet_frag_destroy);
340 330
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
390 380
391 q->net = nf; 381 q->net = nf;
392 f->constructor(q, arg); 382 f->constructor(q, arg);
393 add_frag_mem_limit(q, f->qsize); 383 add_frag_mem_limit(nf, f->qsize);
394 384
395 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 385 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396 spin_lock_init(&q->lock); 386 spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 5f9b063bbe8a..0cb9165421d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
624 624
625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
626{ 626{
627 unsigned int locksz = sizeof(spinlock_t);
627 unsigned int i, nblocks = 1; 628 unsigned int i, nblocks = 1;
628 629
629 if (sizeof(spinlock_t) != 0) { 630 if (locksz != 0) {
630 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 /* allocate 2 cache lines or at least one spinlock per cpu */
631 nblocks = max_t(unsigned int, 632 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
632 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
633 1);
634 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
635 634
636 /* no more locks than number of hash buckets */ 635 /* no more locks than number of hash buckets */
637 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
638 637
639 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
640 GFP_KERNEL | __GFP_NOWARN); 639 GFP_KERNEL | __GFP_NOWARN);
641 if (!hashinfo->ehash_locks) 640 if (!hashinfo->ehash_locks)
642 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
643 642
644 if (!hashinfo->ehash_locks) 643 if (!hashinfo->ehash_locks)
645 return -ENOMEM; 644 return -ENOMEM;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a50dc6d408d1..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
202 ipq_kill(qp); 202 ipq_kill(qp);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 205 if (!inet_frag_evicting(&qp->q)) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 const struct iphdr *iph; 207 const struct iphdr *iph;
208 int err; 208 int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 kfree_skb(fp); 309 kfree_skb(fp);
310 fp = xp; 310 fp = xp;
311 } while (fp); 311 } while (fp);
312 sub_frag_mem_limit(&qp->q, sum_truesize); 312 sub_frag_mem_limit(qp->q.net, sum_truesize);
313 313
314 qp->q.flags = 0; 314 qp->q.flags = 0;
315 qp->q.len = 0; 315 qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
351 ihl = ip_hdrlen(skb); 351 ihl = ip_hdrlen(skb);
352 352
353 /* Determine the position of this fragment. */ 353 /* Determine the position of this fragment. */
354 end = offset + skb->len - ihl; 354 end = offset + skb->len - skb_network_offset(skb) - ihl;
355 err = -EINVAL; 355 err = -EINVAL;
356 356
357 /* Is this the final fragment? */ 357 /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
381 goto err; 381 goto err;
382 382
383 err = -ENOMEM; 383 err = -ENOMEM;
384 if (!pskb_pull(skb, ihl)) 384 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
385 goto err; 385 goto err;
386 386
387 err = pskb_trim_rcsum(skb, end - offset); 387 err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
455 qp->q.fragments = next; 455 qp->q.fragments = next;
456 456
457 qp->q.meat -= free_it->len; 457 qp->q.meat -= free_it->len;
458 sub_frag_mem_limit(&qp->q, free_it->truesize); 458 sub_frag_mem_limit(qp->q.net, free_it->truesize);
459 kfree_skb(free_it); 459 kfree_skb(free_it);
460 } 460 }
461 } 461 }
@@ -479,7 +479,7 @@ found:
479 qp->q.stamp = skb->tstamp; 479 qp->q.stamp = skb->tstamp;
480 qp->q.meat += skb->len; 480 qp->q.meat += skb->len;
481 qp->ecn |= ecn; 481 qp->ecn |= ecn;
482 add_frag_mem_limit(&qp->q, skb->truesize); 482 add_frag_mem_limit(qp->q.net, skb->truesize);
483 if (offset == 0) 483 if (offset == 0)
484 qp->q.flags |= INET_FRAG_FIRST_IN; 484 qp->q.flags |= INET_FRAG_FIRST_IN;
485 485
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
587 head->len -= clone->len; 587 head->len -= clone->len;
588 clone->csum = 0; 588 clone->csum = 0;
589 clone->ip_summed = head->ip_summed; 589 clone->ip_summed = head->ip_summed;
590 add_frag_mem_limit(&qp->q, clone->truesize); 590 add_frag_mem_limit(qp->q.net, clone->truesize);
591 } 591 }
592 592
593 skb_push(head, head->data - skb_network_header(head)); 593 skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
615 } 615 }
616 fp = next; 616 fp = next;
617 } 617 }
618 sub_frag_mem_limit(&qp->q, sum_truesize); 618 sub_frag_mem_limit(qp->q.net, sum_truesize);
619 619
620 head->next = NULL; 620 head->next = NULL;
621 head->dev = dev; 621 head->dev = dev;
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
641 iph->frag_off = 0; 641 iph->frag_off = 0;
642 } 642 }
643 643
644 ip_send_check(iph);
645
644 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
645 qp->q.fragments = NULL; 647 qp->q.fragments = NULL;
646 qp->q.fragments_tail = NULL; 648 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 4c2c3ba4ba65..626d9e56a6bd 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
586EXPORT_SYMBOL(ip_tunnel_encap); 586EXPORT_SYMBOL(ip_tunnel_encap);
587 587
588static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, 588static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
589 struct rtable *rt, __be16 df) 589 struct rtable *rt, __be16 df,
590 const struct iphdr *inner_iph)
590{ 591{
591 struct ip_tunnel *tunnel = netdev_priv(dev); 592 struct ip_tunnel *tunnel = netdev_priv(dev);
592 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; 593 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
603 604
604 if (skb->protocol == htons(ETH_P_IP)) { 605 if (skb->protocol == htons(ETH_P_IP)) {
605 if (!skb_is_gso(skb) && 606 if (!skb_is_gso(skb) &&
606 (df & htons(IP_DF)) && mtu < pkt_size) { 607 (inner_iph->frag_off & htons(IP_DF)) &&
608 mtu < pkt_size) {
607 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 609 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
608 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 610 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
609 return -E2BIG; 611 return -E2BIG;
@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
737 goto tx_error; 739 goto tx_error;
738 } 740 }
739 741
740 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { 742 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
741 ip_rt_put(rt); 743 ip_rt_put(rt);
742 goto tx_error; 744 goto tx_error;
743 } 745 }
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 95c9b6eece25..92305a1a021a 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -254,9 +254,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 254 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
255 unsigned int verdict = NF_DROP; 255 unsigned int verdict = NF_DROP;
256 const struct arphdr *arp; 256 const struct arphdr *arp;
257 struct arpt_entry *e, *back; 257 struct arpt_entry *e, **jumpstack;
258 const char *indev, *outdev; 258 const char *indev, *outdev;
259 const void *table_base; 259 const void *table_base;
260 unsigned int cpu, stackidx = 0;
260 const struct xt_table_info *private; 261 const struct xt_table_info *private;
261 struct xt_action_param acpar; 262 struct xt_action_param acpar;
262 unsigned int addend; 263 unsigned int addend;
@@ -270,15 +271,16 @@ unsigned int arpt_do_table(struct sk_buff *skb,
270 local_bh_disable(); 271 local_bh_disable();
271 addend = xt_write_recseq_begin(); 272 addend = xt_write_recseq_begin();
272 private = table->private; 273 private = table->private;
274 cpu = smp_processor_id();
273 /* 275 /*
274 * Ensure we load private-> members after we've fetched the base 276 * Ensure we load private-> members after we've fetched the base
275 * pointer. 277 * pointer.
276 */ 278 */
277 smp_read_barrier_depends(); 279 smp_read_barrier_depends();
278 table_base = private->entries; 280 table_base = private->entries;
281 jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
279 282
280 e = get_entry(table_base, private->hook_entry[hook]); 283 e = get_entry(table_base, private->hook_entry[hook]);
281 back = get_entry(table_base, private->underflow[hook]);
282 284
283 acpar.in = state->in; 285 acpar.in = state->in;
284 acpar.out = state->out; 286 acpar.out = state->out;
@@ -312,18 +314,23 @@ unsigned int arpt_do_table(struct sk_buff *skb,
312 verdict = (unsigned int)(-v) - 1; 314 verdict = (unsigned int)(-v) - 1;
313 break; 315 break;
314 } 316 }
315 e = back; 317 if (stackidx == 0) {
316 back = get_entry(table_base, back->comefrom); 318 e = get_entry(table_base,
319 private->underflow[hook]);
320 } else {
321 e = jumpstack[--stackidx];
322 e = arpt_next_entry(e);
323 }
317 continue; 324 continue;
318 } 325 }
319 if (table_base + v 326 if (table_base + v
320 != arpt_next_entry(e)) { 327 != arpt_next_entry(e)) {
321 /* Save old back ptr in next entry */
322 struct arpt_entry *next = arpt_next_entry(e);
323 next->comefrom = (void *)back - table_base;
324 328
325 /* set back pointer to next entry */ 329 if (stackidx >= private->stacksize) {
326 back = next; 330 verdict = NF_DROP;
331 break;
332 }
333 jumpstack[stackidx++] = e;
327 } 334 }
328 335
329 e = get_entry(table_base, v); 336 e = get_entry(table_base, v);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc183411e..95ea633e8356 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
226 226
227 synproxy_build_options(nth, opts); 227 synproxy_build_options(nth, opts);
228 228
229 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 229 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
230 niph, nth, tcp_hdr_size);
230} 231}
231 232
232static bool 233static bool
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2176 if (!res.prefixlen && 2176 if (!res.prefixlen &&
2177 res.table->tb_num_default > 1 && 2177 res.table->tb_num_default > 1 &&
2178 res.type == RTN_UNICAST && !fl4->flowi4_oif) 2178 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2179 fib_select_default(&res); 2179 fib_select_default(fl4, &res);
2180 2180
2181 if (!fl4->saddr) 2181 if (!fl4->saddr)
2182 fl4->saddr = FIB_RES_PREFSRC(net, res); 2182 fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231ccfb17..0330ab2e2b63 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
41static int tcp_syn_retries_max = MAX_TCP_SYNCNT; 41static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
42static int ip_ping_group_range_min[] = { 0, 0 }; 42static int ip_ping_group_range_min[] = { 0, 0 };
43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; 43static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
44static int min_sndbuf = SOCK_MIN_SNDBUF;
45static int min_rcvbuf = SOCK_MIN_RCVBUF;
46 44
47/* Update system visible IP port range */ 45/* Update system visible IP port range */
48static void set_local_port_range(struct net *net, int range[2]) 46static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
530 .maxlen = sizeof(sysctl_tcp_wmem), 528 .maxlen = sizeof(sysctl_tcp_wmem),
531 .mode = 0644, 529 .mode = 0644,
532 .proc_handler = proc_dointvec_minmax, 530 .proc_handler = proc_dointvec_minmax,
533 .extra1 = &min_sndbuf, 531 .extra1 = &one,
534 }, 532 },
535 { 533 {
536 .procname = "tcp_notsent_lowat", 534 .procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
545 .maxlen = sizeof(sysctl_tcp_rmem), 543 .maxlen = sizeof(sysctl_tcp_rmem),
546 .mode = 0644, 544 .mode = 0644,
547 .proc_handler = proc_dointvec_minmax, 545 .proc_handler = proc_dointvec_minmax,
548 .extra1 = &min_rcvbuf, 546 .extra1 = &one,
549 }, 547 },
550 { 548 {
551 .procname = "tcp_app_win", 549 .procname = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
758 .maxlen = sizeof(sysctl_udp_rmem_min), 756 .maxlen = sizeof(sysctl_udp_rmem_min),
759 .mode = 0644, 757 .mode = 0644,
760 .proc_handler = proc_dointvec_minmax, 758 .proc_handler = proc_dointvec_minmax,
761 .extra1 = &min_rcvbuf, 759 .extra1 = &one
762 }, 760 },
763 { 761 {
764 .procname = "udp_wmem_min", 762 .procname = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
766 .maxlen = sizeof(sysctl_udp_wmem_min), 764 .maxlen = sizeof(sysctl_udp_wmem_min),
767 .mode = 0644, 765 .mode = 0644,
768 .proc_handler = proc_dointvec_minmax, 766 .proc_handler = proc_dointvec_minmax,
769 .extra1 = &min_sndbuf, 767 .extra1 = &one
770 }, 768 },
771 { } 769 { }
772}; 770};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
780 ret = -EAGAIN; 780 ret = -EAGAIN;
781 break; 781 break;
782 } 782 }
783 sk_wait_data(sk, &timeo); 783 sk_wait_data(sk, &timeo, NULL);
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 ret = sock_intr_errno(timeo); 785 ret = sock_intr_errno(timeo);
786 break; 786 break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1575 int target; /* Read at least this many bytes */ 1575 int target; /* Read at least this many bytes */
1576 long timeo; 1576 long timeo;
1577 struct task_struct *user_recv = NULL; 1577 struct task_struct *user_recv = NULL;
1578 struct sk_buff *skb; 1578 struct sk_buff *skb, *last;
1579 u32 urg_hole = 0; 1579 u32 urg_hole = 0;
1580 1580
1581 if (unlikely(flags & MSG_ERRQUEUE)) 1581 if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1635 1635
1636 /* Next get a buffer. */ 1636 /* Next get a buffer. */
1637 1637
1638 last = skb_peek_tail(&sk->sk_receive_queue);
1638 skb_queue_walk(&sk->sk_receive_queue, skb) { 1639 skb_queue_walk(&sk->sk_receive_queue, skb) {
1640 last = skb;
1639 /* Now that we have two receive queues this 1641 /* Now that we have two receive queues this
1640 * shouldn't happen. 1642 * shouldn't happen.
1641 */ 1643 */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1754 /* Do not sleep, just process backlog. */ 1756 /* Do not sleep, just process backlog. */
1755 release_sock(sk); 1757 release_sock(sk);
1756 lock_sock(sk); 1758 lock_sock(sk);
1757 } else 1759 } else {
1758 sk_wait_data(sk, &timeo); 1760 sk_wait_data(sk, &timeo, last);
1761 }
1759 1762
1760 if (user_recv) { 1763 if (user_recv) {
1761 int chunk; 1764 int chunk;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 684f095d196e..728f5b3d3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1917 const struct inet_connection_sock *icsk = inet_csk(sk);
1918 struct tcp_sock *tp = tcp_sk(sk); 1918 struct tcp_sock *tp = tcp_sk(sk);
1919 struct sk_buff *skb; 1919 struct sk_buff *skb;
1920 bool new_recovery = false; 1920 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1921 bool is_reneg; /* is receiver reneging on SACKs? */
1922 1922
1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1923 /* Reduce ssthresh if it has not yet been made inside this window. */
1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1925 !after(tp->high_seq, tp->snd_una) || 1925 !after(tp->high_seq, tp->snd_una) ||
1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1927 new_recovery = true;
1928 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1929 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1930 tcp_ca_event(sk, CA_EVENT_LOSS); 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b79cf2..0ea2e1c5d395 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); 1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1349 if (req) { 1349 if (req) {
1350 nsk = tcp_check_req(sk, skb, req, false); 1350 nsk = tcp_check_req(sk, skb, req, false);
1351 if (!nsk) 1351 if (!nsk || nsk == sk)
1352 reqsk_put(req); 1352 reqsk_put(req);
1353 return nsk; 1353 return nsk;
1354 } 1354 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604f9273..1b8c5ba7d5f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
1995 1995
1996 skb->sk = sk; 1996 skb->sk = sk;
1997 skb->destructor = sock_efree; 1997 skb->destructor = sock_efree;
1998 dst = sk->sk_rx_dst; 1998 dst = READ_ONCE(sk->sk_rx_dst);
1999 1999
2000 if (dst) 2000 if (dst)
2001 dst = dst_check(dst, 0); 2001 dst = dst_check(dst, 0);
2002 if (dst) 2002 if (dst) {
2003 skb_dst_set_noref(skb, dst); 2003 /* DST_NOCACHE can not be used without taking a reference */
2004 if (dst->flags & DST_NOCACHE) {
2005 if (likely(atomic_inc_not_zero(&dst->__refcnt)))
2006 skb_dst_set(skb, dst);
2007 } else {
2008 skb_dst_set_noref(skb, dst);
2009 }
2010 }
2004} 2011}
2005 2012
2006int udp_rcv(struct sk_buff *skb) 2013int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 62d908e64eeb..b10a88986a98 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 44{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
56 if (usin->sin6_family == AF_INET) { 56 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 57 if (__ipv6_only_sock(sk))
58 return -EAFNOSUPPORT; 58 return -EAFNOSUPPORT;
59 err = ip4_datagram_connect(sk, uaddr, addr_len); 59 err = __ip4_datagram_connect(sk, uaddr, addr_len);
60 goto ipv4_connected; 60 goto ipv4_connected;
61 } 61 }
62 62
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 98 sin.sin_addr.s_addr = daddr->s6_addr32[3];
99 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
100 100
101 err = ip4_datagram_connect(sk, 101 err = __ip4_datagram_connect(sk,
102 (struct sockaddr *) &sin, 102 (struct sockaddr *) &sin,
103 sizeof(sin)); 103 sizeof(sin));
104 104
105ipv4_connected: 105ipv4_connected:
106 if (err) 106 if (err)
@@ -204,6 +204,16 @@ out:
204 fl6_sock_release(flowlabel); 204 fl6_sock_release(flowlabel);
205 return err; 205 return err;
206} 206}
207
208int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209{
210 int res;
211
212 lock_sock(sk);
213 res = __ip6_datagram_connect(sk, uaddr, addr_len);
214 release_sock(sk);
215 return res;
216}
207EXPORT_SYMBOL_GPL(ip6_datagram_connect); 217EXPORT_SYMBOL_GPL(ip6_datagram_connect);
208 218
209int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, 219int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d19861ab20..548c6237b1e7 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
172 *ppcpu_rt = NULL; 172 *ppcpu_rt = NULL;
173 } 173 }
174 } 174 }
175
176 non_pcpu_rt->rt6i_pcpu = NULL;
175} 177}
176 178
177static void rt6_release(struct rt6_info *rt) 179static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index f2e464eba5ef..57990c929cd8 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
331 if (offset < 0) 331 if (offset < 0)
332 goto out; 332 goto out;
333 333
334 if (!ipv6_is_mld(skb, nexthdr, offset)) 334 if (ipv6_is_mld(skb, nexthdr, offset))
335 goto out; 335 deliver = true;
336 336
337 deliver = true; 337 goto out;
338 } 338 }
339 /* unknown RA - process it normally */ 339 /* unknown RA - process it normally */
340 } 340 }
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e893cd18612f..08b62047c67f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
292static const struct net_offload sit_offload = { 292static const struct net_offload sit_offload = {
293 .callbacks = { 293 .callbacks = {
294 .gso_segment = ipv6_gso_segment, 294 .gso_segment = ipv6_gso_segment,
295 .gro_receive = ipv6_gro_receive,
296 .gro_complete = ipv6_gro_complete,
297 }, 295 },
298}; 296};
299 297
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5ab31e..9405b04eecc6 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
143 struct sk_buff *skb_chk = NULL; 143 struct sk_buff *skb_chk = NULL;
144 unsigned int transport_len; 144 unsigned int transport_len;
145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); 145 unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
146 int ret; 146 int ret = -EINVAL;
147 147
148 transport_len = ntohs(ipv6_hdr(skb)->payload_len); 148 transport_len = ntohs(ipv6_hdr(skb)->payload_len);
149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); 149 transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
150 150
151 skb_get(skb);
152 skb_chk = skb_checksum_trimmed(skb, transport_len, 151 skb_chk = skb_checksum_trimmed(skb, transport_len,
153 ipv6_mc_validate_checksum); 152 ipv6_mc_validate_checksum);
154 if (!skb_chk) 153 if (!skb_chk)
155 return -EINVAL; 154 goto err;
156 155
157 if (!pskb_may_pull(skb_chk, len)) { 156 if (!pskb_may_pull(skb_chk, len))
158 kfree_skb(skb_chk); 157 goto err;
159 return -EINVAL;
160 }
161 158
162 ret = ipv6_mc_check_mld_msg(skb_chk); 159 ret = ipv6_mc_check_mld_msg(skb_chk);
163 if (ret) { 160 if (ret)
164 kfree_skb(skb_chk); 161 goto err;
165 return ret;
166 }
167 162
168 if (skb_trimmed) 163 if (skb_trimmed)
169 *skb_trimmed = skb_chk; 164 *skb_trimmed = skb_chk;
170 else 165 /* free now unneeded clone */
166 else if (skb_chk != skb)
171 kfree_skb(skb_chk); 167 kfree_skb(skb_chk);
172 168
173 return 0; 169 ret = 0;
170
171err:
172 if (ret && skb_chk && skb_chk != skb)
173 kfree_skb(skb_chk);
174
175 return ret;
174} 176}
175 177
176/** 178/**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
179 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) 181 * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
180 * 182 *
181 * Checks whether an IPv6 packet is a valid MLD packet. If so sets 183 * Checks whether an IPv6 packet is a valid MLD packet. If so sets
182 * skb network and transport headers accordingly and returns zero. 184 * skb transport header accordingly and returns zero.
183 * 185 *
184 * -EINVAL: A broken packet was detected, i.e. it violates some internet 186 * -EINVAL: A broken packet was detected, i.e. it violates some internet
185 * standard 187 * standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
194 * to leave the original skb and its full frame unchanged (which might be 196 * to leave the original skb and its full frame unchanged (which might be
195 * desirable for layer 2 frame jugglers). 197 * desirable for layer 2 frame jugglers).
196 * 198 *
197 * The caller needs to release a reference count from any returned skb_trimmed. 199 * Caller needs to set the skb network header and free any returned skb if it
200 * differs from the provided skb.
198 */ 201 */
199int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) 202int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
200{ 203{
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1651{ 1651{
1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1653 struct netdev_notifier_change_info *change_info;
1653 struct net *net = dev_net(dev); 1654 struct net *net = dev_net(dev);
1654 struct inet6_dev *idev; 1655 struct inet6_dev *idev;
1655 1656
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1664 ndisc_send_unsol_na(dev); 1665 ndisc_send_unsol_na(dev);
1665 in6_dev_put(idev); 1666 in6_dev_put(idev);
1666 break; 1667 break;
1668 case NETDEV_CHANGE:
1669 change_info = ptr;
1670 if (change_info->flags_changed & IFF_NOARP)
1671 neigh_changeaddr(&nd_tbl, dev);
1672 break;
1667 case NETDEV_DOWN: 1673 case NETDEV_DOWN:
1668 neigh_ifdown(&nd_tbl, dev); 1674 neigh_ifdown(&nd_tbl, dev);
1669 fib6_run_gc(0, net, false); 1675 fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b106de7..ebbb754c2111 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
37} 37}
38 38
39static void 39static void
40synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, 40synproxy_send_tcp(const struct synproxy_net *snet,
41 const struct sk_buff *skb, struct sk_buff *nskb,
41 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
42 struct ipv6hdr *niph, struct tcphdr *nth, 43 struct ipv6hdr *niph, struct tcphdr *nth,
43 unsigned int tcp_hdr_size) 44 unsigned int tcp_hdr_size)
44{ 45{
45 struct net *net = nf_ct_net((struct nf_conn *)nfct); 46 struct net *net = nf_ct_net(snet->tmpl);
46 struct dst_entry *dst; 47 struct dst_entry *dst;
47 struct flowi6 fl6; 48 struct flowi6 fl6;
48 49
@@ -83,7 +84,8 @@ free_nskb:
83} 84}
84 85
85static void 86static void
86synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, 87synproxy_send_client_synack(const struct synproxy_net *snet,
88 const struct sk_buff *skb, const struct tcphdr *th,
87 const struct synproxy_options *opts) 89 const struct synproxy_options *opts)
88{ 90{
89 struct sk_buff *nskb; 91 struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
119 121
120 synproxy_build_options(nth, opts); 122 synproxy_build_options(nth, opts);
121 123
122 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
123 niph, nth, tcp_hdr_size); 125 niph, nth, tcp_hdr_size);
124} 126}
125 127
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
163 165
164 synproxy_build_options(nth, opts); 166 synproxy_build_options(nth, opts);
165 167
166 synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 168 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
167 niph, nth, tcp_hdr_size); 169 niph, nth, tcp_hdr_size);
168} 170}
169 171
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
203 205
204 synproxy_build_options(nth, opts); 206 synproxy_build_options(nth, opts);
205 207
206 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 208 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
207} 209}
208 210
209static void 211static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
241 243
242 synproxy_build_options(nth, opts); 244 synproxy_build_options(nth, opts);
243 245
244 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 246 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
247 niph, nth, tcp_hdr_size);
245} 248}
246 249
247static bool 250static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
301 XT_SYNPROXY_OPT_SACK_PERM | 304 XT_SYNPROXY_OPT_SACK_PERM |
302 XT_SYNPROXY_OPT_ECN); 305 XT_SYNPROXY_OPT_ECN);
303 306
304 synproxy_send_client_synack(skb, th, &opts); 307 synproxy_send_client_synack(snet, skb, th, &opts);
305 return NF_DROP; 308 return NF_DROP;
306 309
307 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 310 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
348 fq->ecn |= ecn; 348 fq->ecn |= ecn;
349 if (payload_len > fq->q.max_size) 349 if (payload_len > fq->q.max_size)
350 fq->q.max_size = payload_len; 350 fq->q.max_size = payload_len;
351 add_frag_mem_limit(&fq->q, skb->truesize); 351 add_frag_mem_limit(fq->q.net, skb->truesize);
352 352
353 /* The first fragment. 353 /* The first fragment.
354 * nhoffset is obtained from the first fragment, of course. 354 * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->ip_summed = head->ip_summed; 430 clone->ip_summed = head->ip_summed;
431 431
432 NFCT_FRAG6_CB(clone)->orig = NULL; 432 NFCT_FRAG6_CB(clone)->orig = NULL;
433 add_frag_mem_limit(&fq->q, clone->truesize); 433 add_frag_mem_limit(fq->q.net, clone->truesize);
434 } 434 }
435 435
436 /* We have to remove fragment header from datagram and to relocate 436 /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
454 head->csum = csum_add(head->csum, fp->csum); 454 head->csum = csum_add(head->csum, fp->csum);
455 head->truesize += fp->truesize; 455 head->truesize += fp->truesize;
456 } 456 }
457 sub_frag_mem_limit(&fq->q, head->truesize); 457 sub_frag_mem_limit(fq->q.net, head->truesize);
458 458
459 head->ignore_df = 1; 459 head->ignore_df = 1;
460 head->next = NULL; 460 head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
144 144
145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
146 146
147 if (fq->q.flags & INET_FRAG_EVICTED) 147 if (inet_frag_evicting(&fq->q))
148 goto out_rcu_unlock; 148 goto out_rcu_unlock;
149 149
150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
330 fq->q.stamp = skb->tstamp; 330 fq->q.stamp = skb->tstamp;
331 fq->q.meat += skb->len; 331 fq->q.meat += skb->len;
332 fq->ecn |= ecn; 332 fq->ecn |= ecn;
333 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(fq->q.net, skb->truesize);
334 334
335 /* The first fragment. 335 /* The first fragment.
336 * nhoffset is obtained from the first fragment, of course. 336 * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
443 head->len -= clone->len; 443 head->len -= clone->len;
444 clone->csum = 0; 444 clone->csum = 0;
445 clone->ip_summed = head->ip_summed; 445 clone->ip_summed = head->ip_summed;
446 add_frag_mem_limit(&fq->q, clone->truesize); 446 add_frag_mem_limit(fq->q.net, clone->truesize);
447 } 447 }
448 448
449 /* We have to remove fragment header from datagram and to relocate 449 /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
481 } 481 }
482 fp = next; 482 fp = next;
483 } 483 }
484 sub_frag_mem_limit(&fq->q, sum_truesize); 484 sub_frag_mem_limit(fq->q.net, sum_truesize);
485 485
486 head->next = NULL; 486 head->next = NULL;
487 head->dev = dev; 487 head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1a1122a6bbf5..d15586490cec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
318/* allocate dst with ip6_dst_ops */ 318/* allocate dst with ip6_dst_ops */
319static struct rt6_info *__ip6_dst_alloc(struct net *net, 319static struct rt6_info *__ip6_dst_alloc(struct net *net,
320 struct net_device *dev, 320 struct net_device *dev,
321 int flags, 321 int flags)
322 struct fib6_table *table)
323{ 322{
324 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 323 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
325 0, DST_OBSOLETE_FORCE_CHK, flags); 324 0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
336 335
337static struct rt6_info *ip6_dst_alloc(struct net *net, 336static struct rt6_info *ip6_dst_alloc(struct net *net,
338 struct net_device *dev, 337 struct net_device *dev,
339 int flags, 338 int flags)
340 struct fib6_table *table)
341{ 339{
342 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); 340 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
343 341
344 if (rt) { 342 if (rt) {
345 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); 343 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -369,10 +367,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
369 struct inet6_dev *idev; 367 struct inet6_dev *idev;
370 368
371 dst_destroy_metrics_generic(dst); 369 dst_destroy_metrics_generic(dst);
372 370 free_percpu(rt->rt6i_pcpu);
373 if (rt->rt6i_pcpu)
374 free_percpu(rt->rt6i_pcpu);
375
376 rt6_uncached_list_del(rt); 371 rt6_uncached_list_del(rt);
377 372
378 idev = rt->rt6i_idev; 373 idev = rt->rt6i_idev;
@@ -953,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
953 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) 948 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
954 ort = (struct rt6_info *)ort->dst.from; 949 ort = (struct rt6_info *)ort->dst.from;
955 950
956 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 951 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
957 0, ort->rt6i_table);
958 952
959 if (!rt) 953 if (!rt)
960 return NULL; 954 return NULL;
@@ -986,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
986 struct rt6_info *pcpu_rt; 980 struct rt6_info *pcpu_rt;
987 981
988 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), 982 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
989 rt->dst.dev, rt->dst.flags, 983 rt->dst.dev, rt->dst.flags);
990 rt->rt6i_table);
991 984
992 if (!pcpu_rt) 985 if (!pcpu_rt)
993 return NULL; 986 return NULL;
@@ -1000,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1000/* It should be called with read_lock_bh(&tb6_lock) acquired */ 993/* It should be called with read_lock_bh(&tb6_lock) acquired */
1001static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) 994static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1002{ 995{
1003 struct rt6_info *pcpu_rt, *prev, **p; 996 struct rt6_info *pcpu_rt, **p;
1004 997
1005 p = this_cpu_ptr(rt->rt6i_pcpu); 998 p = this_cpu_ptr(rt->rt6i_pcpu);
1006 pcpu_rt = *p; 999 pcpu_rt = *p;
1007 1000
1008 if (pcpu_rt) 1001 if (pcpu_rt) {
1009 goto done; 1002 dst_hold(&pcpu_rt->dst);
1003 rt6_dst_from_metrics_check(pcpu_rt);
1004 }
1005 return pcpu_rt;
1006}
1007
1008static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1009{
1010 struct fib6_table *table = rt->rt6i_table;
1011 struct rt6_info *pcpu_rt, *prev, **p;
1010 1012
1011 pcpu_rt = ip6_rt_pcpu_alloc(rt); 1013 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1012 if (!pcpu_rt) { 1014 if (!pcpu_rt) {
1013 struct net *net = dev_net(rt->dst.dev); 1015 struct net *net = dev_net(rt->dst.dev);
1014 1016
1015 pcpu_rt = net->ipv6.ip6_null_entry; 1017 dst_hold(&net->ipv6.ip6_null_entry->dst);
1016 goto done; 1018 return net->ipv6.ip6_null_entry;
1017 } 1019 }
1018 1020
1019 prev = cmpxchg(p, NULL, pcpu_rt); 1021 read_lock_bh(&table->tb6_lock);
1020 if (prev) { 1022 if (rt->rt6i_pcpu) {
1021 /* If someone did it before us, return prev instead */ 1023 p = this_cpu_ptr(rt->rt6i_pcpu);
1024 prev = cmpxchg(p, NULL, pcpu_rt);
1025 if (prev) {
1026 /* If someone did it before us, return prev instead */
1027 dst_destroy(&pcpu_rt->dst);
1028 pcpu_rt = prev;
1029 }
1030 } else {
1031 /* rt has been removed from the fib6 tree
1032 * before we have a chance to acquire the read_lock.
1033 * In this case, don't brother to create a pcpu rt
1034 * since rt is going away anyway. The next
1035 * dst_check() will trigger a re-lookup.
1036 */
1022 dst_destroy(&pcpu_rt->dst); 1037 dst_destroy(&pcpu_rt->dst);
1023 pcpu_rt = prev; 1038 pcpu_rt = rt;
1024 } 1039 }
1025
1026done:
1027 dst_hold(&pcpu_rt->dst); 1040 dst_hold(&pcpu_rt->dst);
1028 rt6_dst_from_metrics_check(pcpu_rt); 1041 rt6_dst_from_metrics_check(pcpu_rt);
1042 read_unlock_bh(&table->tb6_lock);
1029 return pcpu_rt; 1043 return pcpu_rt;
1030} 1044}
1031 1045
@@ -1100,9 +1114,22 @@ redo_rt6_select:
1100 rt->dst.lastuse = jiffies; 1114 rt->dst.lastuse = jiffies;
1101 rt->dst.__use++; 1115 rt->dst.__use++;
1102 pcpu_rt = rt6_get_pcpu_route(rt); 1116 pcpu_rt = rt6_get_pcpu_route(rt);
1103 read_unlock_bh(&table->tb6_lock); 1117
1118 if (pcpu_rt) {
1119 read_unlock_bh(&table->tb6_lock);
1120 } else {
1121 /* We have to do the read_unlock first
1122 * because rt6_make_pcpu_route() may trigger
1123 * ip6_dst_gc() which will take the write_lock.
1124 */
1125 dst_hold(&rt->dst);
1126 read_unlock_bh(&table->tb6_lock);
1127 pcpu_rt = rt6_make_pcpu_route(rt);
1128 dst_release(&rt->dst);
1129 }
1104 1130
1105 return pcpu_rt; 1131 return pcpu_rt;
1132
1106 } 1133 }
1107} 1134}
1108 1135
@@ -1558,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1558 if (unlikely(!idev)) 1585 if (unlikely(!idev))
1559 return ERR_PTR(-ENODEV); 1586 return ERR_PTR(-ENODEV);
1560 1587
1561 rt = ip6_dst_alloc(net, dev, 0, NULL); 1588 rt = ip6_dst_alloc(net, dev, 0);
1562 if (unlikely(!rt)) { 1589 if (unlikely(!rt)) {
1563 in6_dev_put(idev); 1590 in6_dev_put(idev);
1564 dst = ERR_PTR(-ENOMEM); 1591 dst = ERR_PTR(-ENOMEM);
@@ -1745,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
1745 if (!table) 1772 if (!table)
1746 goto out; 1773 goto out;
1747 1774
1748 rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); 1775 rt = ip6_dst_alloc(net, NULL,
1776 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1749 1777
1750 if (!rt) { 1778 if (!rt) {
1751 err = -ENOMEM; 1779 err = -ENOMEM;
@@ -1834,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
1834 int gwa_type; 1862 int gwa_type;
1835 1863
1836 gw_addr = &cfg->fc_gateway; 1864 gw_addr = &cfg->fc_gateway;
1865 gwa_type = ipv6_addr_type(gw_addr);
1837 1866
1838 /* if gw_addr is local we will fail to detect this in case 1867 /* if gw_addr is local we will fail to detect this in case
1839 * address is still TENTATIVE (DAD in progress). rt6_lookup() 1868 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1841,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
1841 * prefix route was assigned to, which might be non-loopback. 1870 * prefix route was assigned to, which might be non-loopback.
1842 */ 1871 */
1843 err = -EINVAL; 1872 err = -EINVAL;
1844 if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) 1873 if (ipv6_chk_addr_and_flags(net, gw_addr,
1874 gwa_type & IPV6_ADDR_LINKLOCAL ?
1875 dev : NULL, 0, 0))
1845 goto out; 1876 goto out;
1846 1877
1847 rt->rt6i_gateway = *gw_addr; 1878 rt->rt6i_gateway = *gw_addr;
1848 gwa_type = ipv6_addr_type(gw_addr);
1849 1879
1850 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { 1880 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1851 struct rt6_info *grt; 1881 struct rt6_info *grt;
@@ -2400,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2400{ 2430{
2401 struct net *net = dev_net(idev->dev); 2431 struct net *net = dev_net(idev->dev);
2402 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 2432 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2403 DST_NOCOUNT, NULL); 2433 DST_NOCOUNT);
2404 if (!rt) 2434 if (!rt)
2405 return ERR_PTR(-ENOMEM); 2435 return ERR_PTR(-ENOMEM);
2406 2436
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c4277aff..7a6cea5e4274 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); 943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 if (req) { 944 if (req) {
945 nsk = tcp_check_req(sk, skb, req, false); 945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk) 946 if (!nsk || nsk == sk)
947 reqsk_put(req); 947 reqsk_put(req);
948 return nsk; 948 return nsk;
949 } 949 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
613 if (signal_pending(current)) 613 if (signal_pending(current))
614 break; 614 break;
615 rc = 0; 615 rc = 0;
616 if (sk_wait_data(sk, &timeo)) 616 if (sk_wait_data(sk, &timeo, NULL))
617 break; 617 break;
618 } 618 }
619 return rc; 619 return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
802 release_sock(sk); 802 release_sock(sk);
803 lock_sock(sk); 803 lock_sock(sk);
804 } else 804 } else
805 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo, NULL);
806 806
807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 29236e832e44..c09c0131bfa2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
723 723
724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 724 debugfs_remove_recursive(sdata->vif.debugfs_dir);
725 sdata->vif.debugfs_dir = NULL; 725 sdata->vif.debugfs_dir = NULL;
726 sdata->debugfs.subdir_stations = NULL;
726} 727}
727 728
728void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) 729void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ed1edac14372..553ac6dd4867 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1863 ieee80211_teardown_sdata(sdata); 1863 ieee80211_teardown_sdata(sdata);
1864} 1864}
1865 1865
1866/*
1867 * Remove all interfaces, may only be called at hardware unregistration
1868 * time because it doesn't do RCU-safe list removals.
1869 */
1870void ieee80211_remove_interfaces(struct ieee80211_local *local) 1866void ieee80211_remove_interfaces(struct ieee80211_local *local)
1871{ 1867{
1872 struct ieee80211_sub_if_data *sdata, *tmp; 1868 struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1875 1871
1876 ASSERT_RTNL(); 1872 ASSERT_RTNL();
1877 1873
1878 /* 1874 /* Before destroying the interfaces, make sure they're all stopped so
1879 * Close all AP_VLAN interfaces first, as otherwise they 1875 * that the hardware is stopped. Otherwise, the driver might still be
1880 * might be closed while the AP interface they belong to 1876 * iterating the interfaces during the shutdown, e.g. from a worker
1881 * is closed, causing unregister_netdevice_many() to crash. 1877 * or from RX processing or similar, and if it does so (using atomic
1878 * iteration) while we're manipulating the list, the iteration will
1879 * crash.
1880 *
1881 * After this, the hardware should be stopped and the driver should
1882 * have stopped all of its activities, so that we can do RCU-unaware
1883 * manipulations of the interface list below.
1882 */ 1884 */
1883 list_for_each_entry(sdata, &local->interfaces, list) 1885 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1884 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1886
1885 dev_close(sdata->dev); 1887 WARN(local->open_count, "%s: open count remains %d\n",
1888 wiphy_name(local->hw.wiphy), local->open_count);
1886 1889
1887 mutex_lock(&local->iflist_mtx); 1890 mutex_lock(&local->iflist_mtx);
1888 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1891 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5438d13e2f00..3b59099413fb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
307 /* AID */ 307 /* AID */
308 pos = skb_put(skb, 2); 308 pos = skb_put(skb, 2);
309 put_unaligned_le16(plid, pos + 2); 309 put_unaligned_le16(plid, pos);
310 } 310 }
311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1122 WLAN_SP_MESH_PEERING_CONFIRM) {
1123 baseaddr += 4; 1123 baseaddr += 4;
1124 baselen += 4; 1124 baselen += 4;
1125
1126 if (baselen > len)
1127 return;
1125 } 1128 }
1126 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
1127 mesh_process_plink_frame(sdata, mgmt, &elems); 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 06b60980c62c..b676b9fa707b 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 76 if (sdata->vif.type != NL80211_IFTYPE_STATION)
77 continue; 77 continue;
78 ieee80211_mgd_quiesce(sdata); 78 ieee80211_mgd_quiesce(sdata);
79 /* If suspended during TX in progress, and wowlan
80 * is enabled (connection will be active) there
81 * can be a race where the driver is put out
82 * of power-save due to TX and during suspend
83 * dynamic_ps_timer is cancelled and TX packet
84 * is flushed, leaving the driver in ACTIVE even
85 * after resuming until dynamic_ps_timer puts
86 * driver back in DOZE.
87 */
88 if (sdata->u.mgd.associated &&
89 sdata->u.mgd.powersave &&
90 !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
91 local->hw.conf.flags |= IEEE80211_CONF_PS;
92 ieee80211_hw_config(local,
93 IEEE80211_CONF_CHANGE_PS);
94 }
79 } 95 }
80 96
81 err = drv_suspend(local, wowlan); 97 err = drv_suspend(local, wowlan);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a7f6c2..3ece7d1034c8 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
92static inline void 92static inline void
93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) 93minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
94{ 94{
95 int j = MAX_THR_RATES; 95 int j;
96 struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats; 96 struct minstrel_rate_stats *tmp_mrs;
97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; 97 struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
98 98
99 while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) > 99 for (j = MAX_THR_RATES; j > 0; --j) {
100 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
101 j--;
102 tmp_mrs = &mi->r[tp_list[j - 1]].stats; 100 tmp_mrs = &mi->r[tp_list[j - 1]].stats;
101 if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
102 minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
103 break;
103 } 104 }
104 105
105 if (j < MAX_THR_RATES - 1) 106 if (j < MAX_THR_RATES - 1)
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ad31b2dab4f5..8db6e2994bbc 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
60 struct ieee80211_channel *ch; 60 struct ieee80211_channel *ch;
61 struct cfg80211_chan_def chandef; 61 struct cfg80211_chan_def chandef;
62 int i, subband_start; 62 int i, subband_start;
63 struct wiphy *wiphy = sdata->local->hw.wiphy;
63 64
64 for (i = start; i <= end; i += spacing) { 65 for (i = start; i <= end; i += spacing) {
65 if (!ch_cnt) 66 if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
70 /* we will be active on the channel */ 71 /* we will be active on the channel */
71 cfg80211_chandef_create(&chandef, ch, 72 cfg80211_chandef_create(&chandef, ch,
72 NL80211_CHAN_NO_HT); 73 NL80211_CHAN_NO_HT);
73 if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 74 if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
74 &chandef, 75 sdata->wdev.iftype)) {
75 sdata->wdev.iftype)) {
76 ch_cnt++; 76 ch_cnt++;
77 /* 77 /*
78 * check if the next channel is also part of 78 * check if the next channel is also part of
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8410bb3bf5e8..b8233505bf9f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1117 queued = true; 1117 queued = true;
1118 info->control.vif = &tx->sdata->vif; 1118 info->control.vif = &tx->sdata->vif;
1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
1121 IEEE80211_TX_CTL_NO_PS_BUFFER |
1122 IEEE80211_TX_STATUS_EOSP;
1121 __skb_queue_tail(&tid_tx->pending, skb); 1123 __skb_queue_tail(&tid_tx->pending, skb);
1122 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1123 purge_skb = __skb_dequeue(&tid_tx->pending); 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
319 * return *ignored=0 i.e. ICMP and NF_DROP 319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */ 320 */
321 sched = rcu_dereference(svc->scheduler); 321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph); 322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
323 if (!dest) { 329 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data); 331 kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
467 } 473 }
468 474
469 sched = rcu_dereference(svc->scheduler); 475 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph); 476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
471 if (dest == NULL) { 483 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n"); 484 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL; 485 return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
842 __ip_vs_dst_cache_reset(dest); 842 __ip_vs_dst_cache_reset(dest);
843 spin_unlock_bh(&dest->dst_lock); 843 spin_unlock_bh(&dest->dst_lock);
844 844
845 sched = rcu_dereference_protected(svc->scheduler, 1);
846 if (add) { 845 if (add) {
847 ip_vs_start_estimator(svc->net, &dest->stats); 846 ip_vs_start_estimator(svc->net, &dest->stats);
848 list_add_rcu(&dest->n_list, &svc->destinations); 847 list_add_rcu(&dest->n_list, &svc->destinations);
849 svc->num_dests++; 848 svc->num_dests++;
850 if (sched->add_dest) 849 sched = rcu_dereference_protected(svc->scheduler, 1);
850 if (sched && sched->add_dest)
851 sched->add_dest(svc, dest); 851 sched->add_dest(svc, dest);
852 } else { 852 } else {
853 if (sched->upd_dest) 853 sched = rcu_dereference_protected(svc->scheduler, 1);
854 if (sched && sched->upd_dest)
854 sched->upd_dest(svc, dest); 855 sched->upd_dest(svc, dest);
855 } 856 }
856} 857}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1084 struct ip_vs_scheduler *sched; 1085 struct ip_vs_scheduler *sched;
1085 1086
1086 sched = rcu_dereference_protected(svc->scheduler, 1); 1087 sched = rcu_dereference_protected(svc->scheduler, 1);
1087 if (sched->del_dest) 1088 if (sched && sched->del_dest)
1088 sched->del_dest(svc, dest); 1089 sched->del_dest(svc, dest);
1089 } 1090 }
1090} 1091}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1175 ip_vs_use_count_inc(); 1176 ip_vs_use_count_inc();
1176 1177
1177 /* Lookup the scheduler by 'u->sched_name' */ 1178 /* Lookup the scheduler by 'u->sched_name' */
1178 sched = ip_vs_scheduler_get(u->sched_name); 1179 if (strcmp(u->sched_name, "none")) {
1179 if (sched == NULL) { 1180 sched = ip_vs_scheduler_get(u->sched_name);
1180 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1181 if (!sched) {
1181 ret = -ENOENT; 1182 pr_info("Scheduler module ip_vs_%s not found\n",
1182 goto out_err; 1183 u->sched_name);
1184 ret = -ENOENT;
1185 goto out_err;
1186 }
1183 } 1187 }
1184 1188
1185 if (u->pe_name && *u->pe_name) { 1189 if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1240 spin_lock_init(&svc->stats.lock); 1244 spin_lock_init(&svc->stats.lock);
1241 1245
1242 /* Bind the scheduler */ 1246 /* Bind the scheduler */
1243 ret = ip_vs_bind_scheduler(svc, sched); 1247 if (sched) {
1244 if (ret) 1248 ret = ip_vs_bind_scheduler(svc, sched);
1245 goto out_err; 1249 if (ret)
1246 sched = NULL; 1250 goto out_err;
1251 sched = NULL;
1252 }
1247 1253
1248 /* Bind the ct retriever */ 1254 /* Bind the ct retriever */
1249 RCU_INIT_POINTER(svc->pe, pe); 1255 RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1291static int 1297static int
1292ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) 1298ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1293{ 1299{
1294 struct ip_vs_scheduler *sched, *old_sched; 1300 struct ip_vs_scheduler *sched = NULL, *old_sched;
1295 struct ip_vs_pe *pe = NULL, *old_pe = NULL; 1301 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
1296 int ret = 0; 1302 int ret = 0;
1297 1303
1298 /* 1304 /*
1299 * Lookup the scheduler, by 'u->sched_name' 1305 * Lookup the scheduler, by 'u->sched_name'
1300 */ 1306 */
1301 sched = ip_vs_scheduler_get(u->sched_name); 1307 if (strcmp(u->sched_name, "none")) {
1302 if (sched == NULL) { 1308 sched = ip_vs_scheduler_get(u->sched_name);
1303 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1309 if (!sched) {
1304 return -ENOENT; 1310 pr_info("Scheduler module ip_vs_%s not found\n",
1311 u->sched_name);
1312 return -ENOENT;
1313 }
1305 } 1314 }
1306 old_sched = sched; 1315 old_sched = sched;
1307 1316
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1329 1338
1330 old_sched = rcu_dereference_protected(svc->scheduler, 1); 1339 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1331 if (sched != old_sched) { 1340 if (sched != old_sched) {
1341 if (old_sched) {
1342 ip_vs_unbind_scheduler(svc, old_sched);
1343 RCU_INIT_POINTER(svc->scheduler, NULL);
1344 /* Wait all svc->sched_data users */
1345 synchronize_rcu();
1346 }
1332 /* Bind the new scheduler */ 1347 /* Bind the new scheduler */
1333 ret = ip_vs_bind_scheduler(svc, sched); 1348 if (sched) {
1334 if (ret) { 1349 ret = ip_vs_bind_scheduler(svc, sched);
1335 old_sched = sched; 1350 if (ret) {
1336 goto out; 1351 ip_vs_scheduler_put(sched);
1352 goto out;
1353 }
1337 } 1354 }
1338 /* Unbind the old scheduler on success */
1339 ip_vs_unbind_scheduler(svc, old_sched);
1340 } 1355 }
1341 1356
1342 /* 1357 /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1982 const struct ip_vs_iter *iter = seq->private; 1997 const struct ip_vs_iter *iter = seq->private;
1983 const struct ip_vs_dest *dest; 1998 const struct ip_vs_dest *dest;
1984 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); 1999 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2000 char *sched_name = sched ? sched->name : "none";
1985 2001
1986 if (iter->table == ip_vs_svc_table) { 2002 if (iter->table == ip_vs_svc_table) {
1987#ifdef CONFIG_IP_VS_IPV6 2003#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1990 ip_vs_proto_name(svc->protocol), 2006 ip_vs_proto_name(svc->protocol),
1991 &svc->addr.in6, 2007 &svc->addr.in6,
1992 ntohs(svc->port), 2008 ntohs(svc->port),
1993 sched->name); 2009 sched_name);
1994 else 2010 else
1995#endif 2011#endif
1996 seq_printf(seq, "%s %08X:%04X %s %s ", 2012 seq_printf(seq, "%s %08X:%04X %s %s ",
1997 ip_vs_proto_name(svc->protocol), 2013 ip_vs_proto_name(svc->protocol),
1998 ntohl(svc->addr.ip), 2014 ntohl(svc->addr.ip),
1999 ntohs(svc->port), 2015 ntohs(svc->port),
2000 sched->name, 2016 sched_name,
2001 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2017 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2002 } else { 2018 } else {
2003 seq_printf(seq, "FWM %08X %s %s", 2019 seq_printf(seq, "FWM %08X %s %s",
2004 svc->fwmark, sched->name, 2020 svc->fwmark, sched_name,
2005 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2021 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2006 } 2022 }
2007 2023
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2427{ 2443{
2428 struct ip_vs_scheduler *sched; 2444 struct ip_vs_scheduler *sched;
2429 struct ip_vs_kstats kstats; 2445 struct ip_vs_kstats kstats;
2446 char *sched_name;
2430 2447
2431 sched = rcu_dereference_protected(src->scheduler, 1); 2448 sched = rcu_dereference_protected(src->scheduler, 1);
2449 sched_name = sched ? sched->name : "none";
2432 dst->protocol = src->protocol; 2450 dst->protocol = src->protocol;
2433 dst->addr = src->addr.ip; 2451 dst->addr = src->addr.ip;
2434 dst->port = src->port; 2452 dst->port = src->port;
2435 dst->fwmark = src->fwmark; 2453 dst->fwmark = src->fwmark;
2436 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); 2454 strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
2437 dst->flags = src->flags; 2455 dst->flags = src->flags;
2438 dst->timeout = src->timeout / HZ; 2456 dst->timeout = src->timeout / HZ;
2439 dst->netmask = src->netmask; 2457 dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2892 struct ip_vs_flags flags = { .flags = svc->flags, 2910 struct ip_vs_flags flags = { .flags = svc->flags,
2893 .mask = ~0 }; 2911 .mask = ~0 };
2894 struct ip_vs_kstats kstats; 2912 struct ip_vs_kstats kstats;
2913 char *sched_name;
2895 2914
2896 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); 2915 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2897 if (!nl_service) 2916 if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2910 } 2929 }
2911 2930
2912 sched = rcu_dereference_protected(svc->scheduler, 1); 2931 sched = rcu_dereference_protected(svc->scheduler, 1);
2932 sched_name = sched ? sched->name : "none";
2913 pe = rcu_dereference_protected(svc->pe, 1); 2933 pe = rcu_dereference_protected(svc->pe, 1);
2914 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || 2934 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
2915 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || 2935 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
2916 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2936 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2917 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || 2937 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
74 74
75 if (sched->done_service) 75 if (sched->done_service)
76 sched->done_service(svc); 76 sched->done_service(svc);
77 /* svc->scheduler can not be set to NULL */ 77 /* svc->scheduler can be set to NULL only by caller */
78} 78}
79 79
80 80
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
147 147
148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
149{ 149{
150 struct ip_vs_scheduler *sched; 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
151 char *sched_name = sched ? sched->name : "none";
151 152
152 sched = rcu_dereference(svc->scheduler);
153 if (svc->fwmark) { 153 if (svc->fwmark) {
154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
155 sched->name, svc->fwmark, svc->fwmark, msg); 155 sched_name, svc->fwmark, svc->fwmark, msg);
156#ifdef CONFIG_IP_VS_IPV6 156#ifdef CONFIG_IP_VS_IPV6
157 } else if (svc->af == AF_INET6) { 157 } else if (svc->af == AF_INET6) {
158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
159 sched->name, ip_vs_proto_name(svc->protocol), 159 sched_name, ip_vs_proto_name(svc->protocol),
160 &svc->addr.in6, ntohs(svc->port), msg); 160 &svc->addr.in6, ntohs(svc->port), msg);
161#endif 161#endif
162 } else { 162 } else {
163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
164 sched->name, ip_vs_proto_name(svc->protocol), 164 sched_name, ip_vs_proto_name(svc->protocol),
165 &svc->addr.ip, ntohs(svc->port), msg); 165 &svc->addr.ip, ntohs(svc->port), msg);
166 } 166 }
167} 167}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
612 pkts = atomic_add_return(1, &cp->in_pkts); 612 pkts = atomic_add_return(1, &cp->in_pkts);
613 else 613 else
614 pkts = sysctl_sync_threshold(ipvs); 614 pkts = sysctl_sync_threshold(ipvs);
615 ip_vs_sync_conn(net, cp->control, pkts); 615 ip_vs_sync_conn(net, cp, pkts);
616 } 616 }
617} 617}
618 618
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
130 130
131 memset(&fl4, 0, sizeof(fl4)); 131 memset(&fl4, 0, sizeof(fl4));
132 fl4.daddr = daddr; 132 fl4.daddr = daddr;
133 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
136 135
@@ -505,6 +504,13 @@ err_put:
505 return -1; 504 return -1;
506 505
507err_unreach: 506err_unreach:
507 /* The ip6_link_failure function requires the dev field to be set
508 * in order to get the net (further for the sake of fwmark
509 * reflection).
510 */
511 if (!skb->dev)
512 skb->dev = skb_dst(skb)->dev;
513
508 dst_link_failure(skb); 514 dst_link_failure(skb);
509 return -1; 515 return -1;
510} 516}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
523 if (ret == NF_ACCEPT) { 529 if (ret == NF_ACCEPT) {
524 nf_reset(skb); 530 nf_reset(skb);
525 skb_forward_csum(skb); 531 skb_forward_csum(skb);
532 if (!skb->sk)
533 skb_sender_cpu_clear(skb);
526 } 534 }
527 return ret; 535 return ret;
528} 536}
529 537
538/* In the event of a remote destination, it's possible that we would have
539 * matches against an old socket (particularly a TIME-WAIT socket). This
540 * causes havoc down the line (ip_local_out et. al. expect regular sockets
541 * and invalid memory accesses will happen) so simply drop the association
542 * in this case.
543*/
544static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
545{
546 /* If dev is set, the packet came from the LOCAL_IN callback and
547 * not from a local TCP socket.
548 */
549 if (skb->dev)
550 skb_orphan(skb);
551}
552
530/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ 553/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
531static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, 554static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
532 struct ip_vs_conn *cp, int local) 555 struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
538 ip_vs_notrack(skb); 561 ip_vs_notrack(skb);
539 else 562 else
540 ip_vs_update_conntrack(skb, cp, 1); 563 ip_vs_update_conntrack(skb, cp, 1);
564
565 /* Remove the early_demux association unless it's bound for the
566 * exact same port and address on this host after translation.
567 */
568 if (!local || cp->vport != cp->dport ||
569 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
570 ip_vs_drop_early_demux_sk(skb);
571
541 if (!local) { 572 if (!local) {
542 skb_forward_csum(skb); 573 skb_forward_csum(skb);
574 if (!skb->sk)
575 skb_sender_cpu_clear(skb);
543 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 576 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
544 NULL, skb_dst(skb)->dev, dst_output_sk); 577 NULL, skb_dst(skb)->dev, dst_output_sk);
545 } else 578 } else
546 ret = NF_ACCEPT; 579 ret = NF_ACCEPT;
580
547 return ret; 581 return ret;
548} 582}
549 583
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
557 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) 591 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
558 ip_vs_notrack(skb); 592 ip_vs_notrack(skb);
559 if (!local) { 593 if (!local) {
594 ip_vs_drop_early_demux_sk(skb);
560 skb_forward_csum(skb); 595 skb_forward_csum(skb);
596 if (!skb->sk)
597 skb_sender_cpu_clear(skb);
561 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 598 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
562 NULL, skb_dst(skb)->dev, dst_output_sk); 599 NULL, skb_dst(skb)->dev, dst_output_sk);
563 } else 600 } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
845 struct ipv6hdr *old_ipv6h = NULL; 882 struct ipv6hdr *old_ipv6h = NULL;
846#endif 883#endif
847 884
885 ip_vs_drop_early_demux_sk(skb);
886
848 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { 887 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
849 new_skb = skb_realloc_headroom(skb, max_headroom); 888 new_skb = skb_realloc_headroom(skb, max_headroom);
850 if (!new_skb) 889 if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..3c20d02aee73 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
287 spin_unlock(&pcpu->lock); 287 spin_unlock(&pcpu->lock);
288} 288}
289 289
290/* Released via destroy_conntrack() */
291struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{
293 struct nf_conn *tmpl;
294
295 tmpl = kzalloc(sizeof(*tmpl), flags);
296 if (tmpl == NULL)
297 return NULL;
298
299 tmpl->status = IPS_TEMPLATE;
300 write_pnet(&tmpl->ct_net, net);
301
302#ifdef CONFIG_NF_CONNTRACK_ZONES
303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone;
305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
307 if (!nf_ct_zone)
308 goto out_free;
309 nf_ct_zone->id = zone;
310 }
311#endif
312 atomic_set(&tmpl->ct_general.use, 0);
313
314 return tmpl;
315#ifdef CONFIG_NF_CONNTRACK_ZONES
316out_free:
317 kfree(tmpl);
318 return NULL;
319#endif
320}
321EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
322
323static void nf_ct_tmpl_free(struct nf_conn *tmpl)
324{
325 nf_ct_ext_destroy(tmpl);
326 nf_ct_ext_free(tmpl);
327 kfree(tmpl);
328}
329
290static void 330static void
291destroy_conntrack(struct nf_conntrack *nfct) 331destroy_conntrack(struct nf_conntrack *nfct)
292{ 332{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
298 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 338 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
299 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 339 NF_CT_ASSERT(!timer_pending(&ct->timeout));
300 340
341 if (unlikely(nf_ct_is_template(ct))) {
342 nf_ct_tmpl_free(ct);
343 return;
344 }
301 rcu_read_lock(); 345 rcu_read_lock();
302 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 346 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
303 if (l4proto && l4proto->destroy) 347 if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
540} 584}
541EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 585EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
542 586
543/* deletion from this larval template list happens via nf_ct_put() */
544void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
545{
546 struct ct_pcpu *pcpu;
547
548 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
549 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
550 nf_conntrack_get(&tmpl->ct_general);
551
552 /* add this conntrack to the (per cpu) tmpl list */
553 local_bh_disable();
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
556
557 spin_lock(&pcpu->lock);
558 /* Overload tuple linked list to put us in template list. */
559 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
560 &pcpu->tmpl);
561 spin_unlock_bh(&pcpu->lock);
562}
563EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
564
565/* Confirm a connection given skb; places it in hash table */ 587/* Confirm a connection given skb; places it in hash table */
566int 588int
567__nf_conntrack_confirm(struct sk_buff *skb) 589__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1522,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1522 sz = nr_slots * sizeof(struct hlist_nulls_head); 1544 sz = nr_slots * sizeof(struct hlist_nulls_head);
1523 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1545 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1524 get_order(sz)); 1546 get_order(sz));
1525 if (!hash) { 1547 if (!hash)
1526 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1527 hash = vzalloc(sz); 1548 hash = vzalloc(sz);
1528 }
1529 1549
1530 if (hash && nulls) 1550 if (hash && nulls)
1531 for (i = 0; i < nr_slots; i++) 1551 for (i = 0; i < nr_slots; i++)
@@ -1751,7 +1771,6 @@ int nf_conntrack_init_net(struct net *net)
1751 spin_lock_init(&pcpu->lock); 1771 spin_lock_init(&pcpu->lock);
1752 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); 1772 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1753 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); 1773 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1754 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1755 } 1774 }
1756 1775
1757 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1776 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 } 220 }
221 221
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
223 nf_ct_zone(a->master) == nf_ct_zone(b->master);
223} 224}
224 225
225static inline int expect_matches(const struct nf_conntrack_expect *a, 226static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2995 } 2995 }
2996 2996
2997 err = nf_ct_expect_related_report(exp, portid, report); 2997 err = nf_ct_expect_related_report(exp, portid, report);
2998 if (err < 0)
2999 goto err_exp;
3000
3001 return 0;
3002err_exp:
3003 nf_ct_expect_put(exp); 2998 nf_ct_expect_put(exp);
3004err_ct: 2999err_ct:
3005 nf_ct_put(ct); 3000 nf_ct_put(ct);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index cd60d397fe05..8a8b2abc35ff 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -213,7 +213,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
213 213
214 if (verdict == NF_ACCEPT) { 214 if (verdict == NF_ACCEPT) {
215 next_hook: 215 next_hook:
216 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], 216 verdict = nf_iterate(entry->state.hook_list,
217 skb, &entry->state, &elem); 217 skb, &entry->state, &elem);
218 } 218 }
219 219
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..d7f168527903 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,23 +349,20 @@ static void __net_exit synproxy_proc_exit(struct net *net)
349static int __net_init synproxy_net_init(struct net *net) 349static int __net_init synproxy_net_init(struct net *net)
350{ 350{
351 struct synproxy_net *snet = synproxy_pernet(net); 351 struct synproxy_net *snet = synproxy_pernet(net);
352 struct nf_conntrack_tuple t;
353 struct nf_conn *ct; 352 struct nf_conn *ct;
354 int err = -ENOMEM; 353 int err = -ENOMEM;
355 354
356 memset(&t, 0, sizeof(t)); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
357 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL); 356 if (!ct)
358 if (IS_ERR(ct)) {
359 err = PTR_ERR(ct);
360 goto err1; 357 goto err1;
361 }
362 358
363 if (!nfct_seqadj_ext_add(ct)) 359 if (!nfct_seqadj_ext_add(ct))
364 goto err2; 360 goto err2;
365 if (!nfct_synproxy_ext_add(ct)) 361 if (!nfct_synproxy_ext_add(ct))
366 goto err2; 362 goto err2;
367 363
368 nf_conntrack_tmpl_insert(net, ct); 364 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
365 nf_conntrack_get(&ct->ct_general);
369 snet->tmpl = ct; 366 snet->tmpl = ct;
370 367
371 snet->stats = alloc_percpu(struct synproxy_stats); 368 snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 8b117c90ecd7..0c0e8ecf02ab 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -269,6 +269,12 @@ static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
269 } 269 }
270} 270}
271 271
272enum {
273 NFNL_BATCH_FAILURE = (1 << 0),
274 NFNL_BATCH_DONE = (1 << 1),
275 NFNL_BATCH_REPLAY = (1 << 2),
276};
277
272static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 278static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
273 u_int16_t subsys_id) 279 u_int16_t subsys_id)
274{ 280{
@@ -276,13 +282,15 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
276 struct net *net = sock_net(skb->sk); 282 struct net *net = sock_net(skb->sk);
277 const struct nfnetlink_subsystem *ss; 283 const struct nfnetlink_subsystem *ss;
278 const struct nfnl_callback *nc; 284 const struct nfnl_callback *nc;
279 bool success = true, done = false;
280 static LIST_HEAD(err_list); 285 static LIST_HEAD(err_list);
286 u32 status;
281 int err; 287 int err;
282 288
283 if (subsys_id >= NFNL_SUBSYS_COUNT) 289 if (subsys_id >= NFNL_SUBSYS_COUNT)
284 return netlink_ack(skb, nlh, -EINVAL); 290 return netlink_ack(skb, nlh, -EINVAL);
285replay: 291replay:
292 status = 0;
293
286 skb = netlink_skb_clone(oskb, GFP_KERNEL); 294 skb = netlink_skb_clone(oskb, GFP_KERNEL);
287 if (!skb) 295 if (!skb)
288 return netlink_ack(oskb, nlh, -ENOMEM); 296 return netlink_ack(oskb, nlh, -ENOMEM);
@@ -336,10 +344,10 @@ replay:
336 if (type == NFNL_MSG_BATCH_BEGIN) { 344 if (type == NFNL_MSG_BATCH_BEGIN) {
337 /* Malformed: Batch begin twice */ 345 /* Malformed: Batch begin twice */
338 nfnl_err_reset(&err_list); 346 nfnl_err_reset(&err_list);
339 success = false; 347 status |= NFNL_BATCH_FAILURE;
340 goto done; 348 goto done;
341 } else if (type == NFNL_MSG_BATCH_END) { 349 } else if (type == NFNL_MSG_BATCH_END) {
342 done = true; 350 status |= NFNL_BATCH_DONE;
343 goto done; 351 goto done;
344 } else if (type < NLMSG_MIN_TYPE) { 352 } else if (type < NLMSG_MIN_TYPE) {
345 err = -EINVAL; 353 err = -EINVAL;
@@ -382,11 +390,8 @@ replay:
382 * original skb. 390 * original skb.
383 */ 391 */
384 if (err == -EAGAIN) { 392 if (err == -EAGAIN) {
385 nfnl_err_reset(&err_list); 393 status |= NFNL_BATCH_REPLAY;
386 ss->abort(oskb); 394 goto next;
387 nfnl_unlock(subsys_id);
388 kfree_skb(skb);
389 goto replay;
390 } 395 }
391 } 396 }
392ack: 397ack:
@@ -402,7 +407,7 @@ ack:
402 */ 407 */
403 nfnl_err_reset(&err_list); 408 nfnl_err_reset(&err_list);
404 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM); 409 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
405 success = false; 410 status |= NFNL_BATCH_FAILURE;
406 goto done; 411 goto done;
407 } 412 }
408 /* We don't stop processing the batch on errors, thus, 413 /* We don't stop processing the batch on errors, thus,
@@ -410,19 +415,26 @@ ack:
410 * triggers. 415 * triggers.
411 */ 416 */
412 if (err) 417 if (err)
413 success = false; 418 status |= NFNL_BATCH_FAILURE;
414 } 419 }
415 420next:
416 msglen = NLMSG_ALIGN(nlh->nlmsg_len); 421 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
417 if (msglen > skb->len) 422 if (msglen > skb->len)
418 msglen = skb->len; 423 msglen = skb->len;
419 skb_pull(skb, msglen); 424 skb_pull(skb, msglen);
420 } 425 }
421done: 426done:
422 if (success && done) 427 if (status & NFNL_BATCH_REPLAY) {
428 ss->abort(oskb);
429 nfnl_err_reset(&err_list);
430 nfnl_unlock(subsys_id);
431 kfree_skb(skb);
432 goto replay;
433 } else if (status == NFNL_BATCH_DONE) {
423 ss->commit(oskb); 434 ss->commit(oskb);
424 else 435 } else {
425 ss->abort(oskb); 436 ss->abort(oskb);
437 }
426 438
427 nfnl_err_deliver(&err_list, oskb); 439 nfnl_err_deliver(&err_list, oskb);
428 nfnl_unlock(subsys_id); 440 nfnl_unlock(subsys_id);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..43ddeee404e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
184static int xt_ct_tg_check(const struct xt_tgchk_param *par, 184static int xt_ct_tg_check(const struct xt_tgchk_param *par,
185 struct xt_ct_target_info_v1 *info) 185 struct xt_ct_target_info_v1 *info)
186{ 186{
187 struct nf_conntrack_tuple t;
188 struct nf_conn *ct; 187 struct nf_conn *ct;
189 int ret = -EOPNOTSUPP; 188 int ret = -EOPNOTSUPP;
190 189
@@ -202,11 +201,11 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 if (ret < 0) 201 if (ret < 0)
203 goto err1; 202 goto err1;
204 203
205 memset(&t, 0, sizeof(t)); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
206 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); 205 if (!ct) {
207 ret = PTR_ERR(ct); 206 ret = -ENOMEM;
208 if (IS_ERR(ct))
209 goto err2; 207 goto err2;
208 }
210 209
211 ret = 0; 210 ret = 0;
212 if ((info->ct_events || info->exp_events) && 211 if ((info->ct_events || info->exp_events) &&
@@ -227,8 +226,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
227 if (ret < 0) 226 if (ret < 0)
228 goto err3; 227 goto err3;
229 } 228 }
230 229 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
231 nf_conntrack_tmpl_insert(par->net, ct); 230 nf_conntrack_get(&ct->ct_general);
232out: 231out:
233 info->ct = ct; 232 info->ct = ct;
234 return 0; 233 return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
126 goto out; 126 goto out;
127 } 127 }
128 128
129 sysfs_attr_init(&info->timer->attr.attr);
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) { 131 if (!info->timer->attr.attr.name) {
131 ret = -ENOMEM; 132 ret = -ENOMEM;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index dea925388a5b..67d210477863 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -158,7 +158,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
158out: 158out:
159 spin_unlock(&netlink_tap_lock); 159 spin_unlock(&netlink_tap_lock);
160 160
161 if (found && nt->module) 161 if (found)
162 module_put(nt->module); 162 module_put(nt->module);
163 163
164 return found ? 0 : -ENODEV; 164 return found ? 0 : -ENODEV;
@@ -357,25 +357,52 @@ err1:
357 return NULL; 357 return NULL;
358} 358}
359 359
360
361static void
362__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
363 unsigned int order)
364{
365 struct netlink_sock *nlk = nlk_sk(sk);
366 struct sk_buff_head *queue;
367 struct netlink_ring *ring;
368
369 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371
372 spin_lock_bh(&queue->lock);
373
374 ring->frame_max = req->nm_frame_nr - 1;
375 ring->head = 0;
376 ring->frame_size = req->nm_frame_size;
377 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
378
379 swap(ring->pg_vec_len, req->nm_block_nr);
380 swap(ring->pg_vec_order, order);
381 swap(ring->pg_vec, pg_vec);
382
383 __skb_queue_purge(queue);
384 spin_unlock_bh(&queue->lock);
385
386 WARN_ON(atomic_read(&nlk->mapped));
387
388 if (pg_vec)
389 free_pg_vec(pg_vec, order, req->nm_block_nr);
390}
391
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 392static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring) 393 bool tx_ring)
362{ 394{
363 struct netlink_sock *nlk = nlk_sk(sk); 395 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring; 396 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL; 397 void **pg_vec = NULL;
367 unsigned int order = 0; 398 unsigned int order = 0;
368 int err;
369 399
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372 401
373 if (!closing) { 402 if (atomic_read(&nlk->mapped))
374 if (atomic_read(&nlk->mapped)) 403 return -EBUSY;
375 return -EBUSY; 404 if (atomic_read(&ring->pending))
376 if (atomic_read(&ring->pending)) 405 return -EBUSY;
377 return -EBUSY;
378 }
379 406
380 if (req->nm_block_nr) { 407 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL) 408 if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
407 return -EINVAL; 434 return -EINVAL;
408 } 435 }
409 436
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock); 437 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) { 438 if (atomic_read(&nlk->mapped) == 0) {
413 err = 0; 439 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
414 spin_lock_bh(&queue->lock); 440 mutex_unlock(&nlk->pg_vec_lock);
415 441 return 0;
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 } 442 }
443
430 mutex_unlock(&nlk->pg_vec_lock); 444 mutex_unlock(&nlk->pg_vec_lock);
431 445
432 if (pg_vec) 446 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr); 447 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err; 448
449 return -EBUSY;
435} 450}
436 451
437static void netlink_mm_open(struct vm_area_struct *vma) 452static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
900 915
901 memset(&req, 0, sizeof(req)); 916 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec) 917 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false); 918 __netlink_set_ring(sk, &req, false, NULL, 0);
904 memset(&req, 0, sizeof(req)); 919 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec) 920 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true); 921 __netlink_set_ring(sk, &req, true, NULL, 0);
907 } 922 }
908#endif /* CONFIG_NETLINK_MMAP */ 923#endif /* CONFIG_NETLINK_MMAP */
909 924
@@ -1081,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
1081 1096
1082 err = __netlink_insert(table, sk); 1097 err = __netlink_insert(table, sk);
1083 if (err) { 1098 if (err) {
1099 /* In case the hashtable backend returns with -EBUSY
1100 * from here, it must not escape to the caller.
1101 */
1102 if (unlikely(err == -EBUSY))
1103 err = -EOVERFLOW;
1084 if (err == -EEXIST) 1104 if (err == -EEXIST)
1085 err = -EADDRINUSE; 1105 err = -EADDRINUSE;
1086 nlk_sk(sk)->portid = 0; 1106 nlk_sk(sk)->portid = 0;
@@ -2223,7 +2243,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2223 return -EINVAL; 2243 return -EINVAL;
2224 if (copy_from_user(&req, optval, sizeof(req))) 2244 if (copy_from_user(&req, optval, sizeof(req)))
2225 return -EFAULT; 2245 return -EFAULT;
2226 err = netlink_set_ring(sk, &req, false, 2246 err = netlink_set_ring(sk, &req,
2227 optname == NETLINK_TX_RING); 2247 optname == NETLINK_TX_RING);
2228 break; 2248 break;
2229 } 2249 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8b4f63..ee34f474ad14 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
273 return 0; 273 return 0;
274} 274}
275 275
276static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, 276static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
277 __be32 *addr, __be32 new_addr) 277 __be32 addr, __be32 new_addr)
278{ 278{
279 int transport_len = skb->len - skb_transport_offset(skb); 279 int transport_len = skb->len - skb_transport_offset(skb);
280 280
281 if (nh->frag_off & htons(IP_OFFSET))
282 return;
283
281 if (nh->protocol == IPPROTO_TCP) { 284 if (nh->protocol == IPPROTO_TCP) {
282 if (likely(transport_len >= sizeof(struct tcphdr))) 285 if (likely(transport_len >= sizeof(struct tcphdr)))
283 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 286 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
284 *addr, new_addr, 1); 287 addr, new_addr, 1);
285 } else if (nh->protocol == IPPROTO_UDP) { 288 } else if (nh->protocol == IPPROTO_UDP) {
286 if (likely(transport_len >= sizeof(struct udphdr))) { 289 if (likely(transport_len >= sizeof(struct udphdr))) {
287 struct udphdr *uh = udp_hdr(skb); 290 struct udphdr *uh = udp_hdr(skb);
288 291
289 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 292 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
290 inet_proto_csum_replace4(&uh->check, skb, 293 inet_proto_csum_replace4(&uh->check, skb,
291 *addr, new_addr, 1); 294 addr, new_addr, 1);
292 if (!uh->check) 295 if (!uh->check)
293 uh->check = CSUM_MANGLED_0; 296 uh->check = CSUM_MANGLED_0;
294 } 297 }
295 } 298 }
296 } 299 }
300}
297 301
302static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
303 __be32 *addr, __be32 new_addr)
304{
305 update_ip_l4_checksum(skb, nh, *addr, new_addr);
298 csum_replace4(&nh->check, *addr, new_addr); 306 csum_replace4(&nh->check, *addr, new_addr);
299 skb_clear_hash(skb); 307 skb_clear_hash(skb);
300 *addr = new_addr; 308 *addr = new_addr;
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4613df8c8290..65523948fb95 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
753 753
754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 + (num_possible_nodes() 755 + (nr_node_ids
756 * sizeof(struct flow_stats *)), 756 * sizeof(struct flow_stats *)),
757 0, 0, NULL); 757 0, 0, NULL);
758 if (flow_cache == NULL) 758 if (flow_cache == NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2403 } 2403 }
2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 addr, hlen); 2405 addr, hlen);
2406 if (tp_len > dev->mtu + dev->hard_header_len) { 2406 if (likely(tp_len >= 0) &&
2407 tp_len > dev->mtu + dev->hard_header_len) {
2407 struct ethhdr *ehdr; 2408 struct ethhdr *ehdr;
2408 /* Earlier code assumed this would be a VLAN pkt, 2409 /* Earlier code assumed this would be a VLAN pkt,
2409 * double-check this now that we have the actual 2410 * double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
2784static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2785static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785{ 2786{
2786 struct packet_sock *po = pkt_sk(sk); 2787 struct packet_sock *po = pkt_sk(sk);
2787 const struct net_device *dev_curr; 2788 struct net_device *dev_curr;
2788 __be16 proto_curr; 2789 __be16 proto_curr;
2789 bool need_rehook; 2790 bool need_rehook;
2790 2791
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2808 2809
2809 po->num = proto; 2810 po->num = proto;
2810 po->prot_hook.type = proto; 2811 po->prot_hook.type = proto;
2811
2812 if (po->prot_hook.dev)
2813 dev_put(po->prot_hook.dev);
2814
2815 po->prot_hook.dev = dev; 2812 po->prot_hook.dev = dev;
2816 2813
2817 po->ifindex = dev ? dev->ifindex : 0; 2814 po->ifindex = dev ? dev->ifindex : 0;
2818 packet_cached_dev_assign(po, dev); 2815 packet_cached_dev_assign(po, dev);
2819 } 2816 }
2817 if (dev_curr)
2818 dev_put(dev_curr);
2820 2819
2821 if (proto == 0 || !need_rehook) 2820 if (proto == 0 || !need_rehook)
2822 goto out_unlock; 2821 goto out_unlock;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 273b8bff6ba4..657ba9f5d308 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
759 } 759 }
760 760
761 ibmr = rds_ib_alloc_fmr(rds_ibdev); 761 ibmr = rds_ib_alloc_fmr(rds_ibdev);
762 if (IS_ERR(ibmr)) 762 if (IS_ERR(ibmr)) {
763 rds_ib_dev_put(rds_ibdev);
763 return ibmr; 764 return ibmr;
765 }
764 766
765 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); 767 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
766 if (ret == 0) 768 if (ret == 0)
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f66187c..140a44a5f7b7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
176 176
177 /* check for all kinds of wrapping and the like */ 177 /* check for all kinds of wrapping and the like */
178 start = (unsigned long)optval; 178 start = (unsigned long)optval;
179 if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { 179 if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
180 ret = -EINVAL; 180 ret = -EINVAL;
181 goto out; 181 goto out;
182 } 182 }
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 8b4a6cd2c3a7..83498e1c75b8 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -73,7 +73,7 @@ EXPORT_SYMBOL_GPL(rds_trans_unregister);
73 73
74void rds_trans_put(struct rds_transport *trans) 74void rds_trans_put(struct rds_transport *trans)
75{ 75{
76 if (trans && trans->t_owner) 76 if (trans)
77 module_put(trans->t_owner); 77 module_put(trans->t_owner);
78} 78}
79 79
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
48int tcf_hash_release(struct tc_action *a, int bind) 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
49{ 49{
50 struct tcf_common *p = a->priv; 50 struct tcf_common *p = a->priv;
51 int ret = 0; 51 int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
53 if (p) { 53 if (p) {
54 if (bind) 54 if (bind)
55 p->tcfc_bindcnt--; 55 p->tcfc_bindcnt--;
56 else if (p->tcfc_bindcnt > 0) 56 else if (strict && p->tcfc_bindcnt > 0)
57 return -EPERM; 57 return -EPERM;
58 58
59 p->tcfc_refcnt--; 59 p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
64 ret = 1; 64 ret = 1;
65 } 65 }
66 } 66 }
67
67 return ret; 68 return ret;
68} 69}
69EXPORT_SYMBOL(tcf_hash_release); 70EXPORT_SYMBOL(__tcf_hash_release);
70 71
71static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
72 struct tc_action *a) 73 struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
136 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
137 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
138 a->priv = p; 139 a->priv = p;
139 ret = tcf_hash_release(a, 0); 140 ret = __tcf_hash_release(a, false, true);
140 if (ret == ACT_P_DELETED) { 141 if (ret == ACT_P_DELETED) {
141 module_put(a->ops->owner); 142 module_put(a->ops->owner);
142 n_i++; 143 n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
408 int ret = 0; 409 int ret = 0;
409 410
410 list_for_each_entry_safe(a, tmp, actions, list) { 411 list_for_each_entry_safe(a, tmp, actions, list) {
411 ret = tcf_hash_release(a, bind); 412 ret = __tcf_hash_release(a, bind, true);
412 if (ret == ACT_P_DELETED) 413 if (ret == ACT_P_DELETED)
413 module_put(a->ops->owner); 414 module_put(a->ops->owner);
414 else if (ret < 0) 415 else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d56903fd4c7..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
27struct tcf_bpf_cfg { 27struct tcf_bpf_cfg {
28 struct bpf_prog *filter; 28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops; 29 struct sock_filter *bpf_ops;
30 char *bpf_name; 30 const char *bpf_name;
31 u32 bpf_fd; 31 u32 bpf_fd;
32 u16 bpf_num_ops; 32 u16 bpf_num_ops;
33 bool is_ebpf;
33}; 34};
34 35
35static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 36static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
207 cfg->bpf_ops = bpf_ops; 208 cfg->bpf_ops = bpf_ops;
208 cfg->bpf_num_ops = bpf_num_ops; 209 cfg->bpf_num_ops = bpf_num_ops;
209 cfg->filter = fp; 210 cfg->filter = fp;
211 cfg->is_ebpf = false;
210 212
211 return 0; 213 return 0;
212} 214}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
241 cfg->bpf_fd = bpf_fd; 243 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name; 244 cfg->bpf_name = name;
243 cfg->filter = fp; 245 cfg->filter = fp;
246 cfg->is_ebpf = true;
244 247
245 return 0; 248 return 0;
246} 249}
247 250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 if (cfg->is_ebpf)
254 bpf_prog_put(cfg->filter);
255 else
256 bpf_prog_destroy(cfg->filter);
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260}
261
262static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264{
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266 cfg->filter = prog->filter;
267
268 cfg->bpf_ops = prog->bpf_ops;
269 cfg->bpf_name = prog->bpf_name;
270}
271
248static int tcf_bpf_init(struct net *net, struct nlattr *nla, 272static int tcf_bpf_init(struct net *net, struct nlattr *nla,
249 struct nlattr *est, struct tc_action *act, 273 struct nlattr *est, struct tc_action *act,
250 int replace, int bind) 274 int replace, int bind)
251{ 275{
252 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 276 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
277 struct tcf_bpf_cfg cfg, old;
253 struct tc_act_bpf *parm; 278 struct tc_act_bpf *parm;
254 struct tcf_bpf *prog; 279 struct tcf_bpf *prog;
255 struct tcf_bpf_cfg cfg;
256 bool is_bpf, is_ebpf; 280 bool is_bpf, is_ebpf;
257 int ret; 281 int ret;
258 282
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
301 prog = to_bpf(act); 325 prog = to_bpf(act);
302 spin_lock_bh(&prog->tcf_lock); 326 spin_lock_bh(&prog->tcf_lock);
303 327
328 if (ret != ACT_P_CREATED)
329 tcf_bpf_prog_fill_cfg(prog, &old);
330
304 prog->bpf_ops = cfg.bpf_ops; 331 prog->bpf_ops = cfg.bpf_ops;
305 prog->bpf_name = cfg.bpf_name; 332 prog->bpf_name = cfg.bpf_name;
306 333
@@ -316,29 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
316 343
317 if (ret == ACT_P_CREATED) 344 if (ret == ACT_P_CREATED)
318 tcf_hash_insert(act); 345 tcf_hash_insert(act);
346 else
347 tcf_bpf_cfg_cleanup(&old);
319 348
320 return ret; 349 return ret;
321 350
322destroy_fp: 351destroy_fp:
323 if (is_ebpf) 352 tcf_bpf_cfg_cleanup(&cfg);
324 bpf_prog_put(cfg.filter);
325 else
326 bpf_prog_destroy(cfg.filter);
327
328 kfree(cfg.bpf_ops);
329 kfree(cfg.bpf_name);
330
331 return ret; 353 return ret;
332} 354}
333 355
334static void tcf_bpf_cleanup(struct tc_action *act, int bind) 356static void tcf_bpf_cleanup(struct tc_action *act, int bind)
335{ 357{
336 const struct tcf_bpf *prog = act->priv; 358 struct tcf_bpf_cfg tmp;
337 359
338 if (tcf_bpf_is_ebpf(prog)) 360 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
339 bpf_prog_put(prog->filter); 361 tcf_bpf_cfg_cleanup(&tmp);
340 else
341 bpf_prog_destroy(prog->filter);
342} 362}
343 363
344static struct tc_action_ops act_bpf_ops __read_mostly = { 364static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b257226..268545050ddb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
98 return ret; 98 return ret;
99 ret = ACT_P_CREATED; 99 ret = ACT_P_CREATED;
100 } else { 100 } else {
101 if (bind)
102 return 0;
101 if (!ovr) { 103 if (!ovr) {
102 tcf_hash_release(a, bind); 104 tcf_hash_release(a, bind);
103 return -EEXIST; 105 return -EEXIST;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
68 } 68 }
69 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
70 } else { 70 } else {
71 p = to_pedit(a);
72 tcf_hash_release(a, bind);
73 if (bind) 71 if (bind)
74 return 0; 72 return 0;
73 tcf_hash_release(a, bind);
75 if (!ovr) 74 if (!ovr)
76 return -EEXIST; 75 return -EEXIST;
77 76 p = to_pedit(a);
78 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 77 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
79 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
80 if (keys == NULL) 79 if (keys == NULL)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c79ecfd36e0f..e5168f8b9640 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
378 goto errout; 378 goto errout;
379 379
380 if (oldprog) { 380 if (oldprog) {
381 list_replace_rcu(&prog->link, &oldprog->link); 381 list_replace_rcu(&oldprog->link, &prog->link);
382 tcf_unbind_filter(tp, &oldprog->res); 382 tcf_unbind_filter(tp, &oldprog->res);
383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
384 } else { 384 } else {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 76bc3a20ffdb..bb2a0f529c1f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
425 if (!fnew) 425 if (!fnew)
426 goto err2; 426 goto err2;
427 427
428 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
429
428 fold = (struct flow_filter *)*arg; 430 fold = (struct flow_filter *)*arg;
429 if (fold) { 431 if (fold) {
430 err = -EINVAL; 432 err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
486 fnew->mask = ~0U; 488 fnew->mask = ~0U;
487 fnew->tp = tp; 489 fnew->tp = tp;
488 get_random_bytes(&fnew->hashrnd, 4); 490 get_random_bytes(&fnew->hashrnd, 4);
489 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
490 } 491 }
491 492
492 fnew->perturb_timer.function = flow_perturbation; 493 fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
526 if (*arg == 0) 527 if (*arg == 0)
527 list_add_tail_rcu(&fnew->list, &head->filters); 528 list_add_tail_rcu(&fnew->list, &head->filters);
528 else 529 else
529 list_replace_rcu(&fnew->list, &fold->list); 530 list_replace_rcu(&fold->list, &fnew->list);
530 531
531 *arg = (unsigned long)fnew; 532 *arg = (unsigned long)fnew;
532 533
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9d37ccd95062..2f3d03f99487 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
499 *arg = (unsigned long) fnew; 499 *arg = (unsigned long) fnew;
500 500
501 if (fold) { 501 if (fold) {
502 list_replace_rcu(&fnew->list, &fold->list); 502 list_replace_rcu(&fold->list, &fnew->list);
503 tcf_unbind_filter(tp, &fold->res); 503 tcf_unbind_filter(tp, &fold->res);
504 call_rcu(&fold->rcu, fl_destroy_filter); 504 call_rcu(&fold->rcu, fl_destroy_filter);
505 } else { 505 } else {
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
385{ 385{
386 struct choke_sched_data *q = qdisc_priv(sch); 386 struct choke_sched_data *q = qdisc_priv(sch);
387 387
388 while (q->head != q->tail) {
389 struct sk_buff *skb = q->tab[q->head];
390
391 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb)
393 continue;
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 }
398
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0;
388 red_restart(&q->vars); 401 red_restart(&q->vars);
389} 402}
390 403
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d75993f89fac..a9ba030435a2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
155 skb = dequeue_head(flow); 155 skb = dequeue_head(flow);
156 len = qdisc_pkt_len(skb); 156 len = qdisc_pkt_len(skb);
157 q->backlogs[idx] -= len; 157 q->backlogs[idx] -= len;
158 kfree_skb(skb);
159 sch->q.qlen--; 158 sch->q.qlen--;
160 qdisc_qstats_drop(sch); 159 qdisc_qstats_drop(sch);
161 qdisc_qstats_backlog_dec(sch, skb); 160 qdisc_qstats_backlog_dec(sch, skb);
161 kfree_skb(skb);
162 flow->dropped++; 162 flow->dropped++;
163 return idx; 163 return idx;
164} 164}
165 165
166static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167{
168 unsigned int prev_backlog;
169
170 prev_backlog = sch->qstats.backlog;
171 fq_codel_drop(sch);
172 return prev_backlog - sch->qstats.backlog;
173}
174
166static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
167{ 176{
168 struct fq_codel_sched_data *q = qdisc_priv(sch); 177 struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -279,10 +288,26 @@ begin:
279 288
280static void fq_codel_reset(struct Qdisc *sch) 289static void fq_codel_reset(struct Qdisc *sch)
281{ 290{
282 struct sk_buff *skb; 291 struct fq_codel_sched_data *q = qdisc_priv(sch);
292 int i;
283 293
284 while ((skb = fq_codel_dequeue(sch)) != NULL) 294 INIT_LIST_HEAD(&q->new_flows);
285 kfree_skb(skb); 295 INIT_LIST_HEAD(&q->old_flows);
296 for (i = 0; i < q->flows_cnt; i++) {
297 struct fq_codel_flow *flow = q->flows + i;
298
299 while (flow->head) {
300 struct sk_buff *skb = dequeue_head(flow);
301
302 qdisc_qstats_backlog_dec(sch, skb);
303 kfree_skb(skb);
304 }
305
306 INIT_LIST_HEAD(&flow->flowchain);
307 codel_vars_init(&flow->cvars);
308 }
309 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
310 sch->q.qlen = 0;
286} 311}
287 312
288static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 313static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
@@ -604,7 +629,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
604 .enqueue = fq_codel_enqueue, 629 .enqueue = fq_codel_enqueue,
605 .dequeue = fq_codel_dequeue, 630 .dequeue = fq_codel_dequeue,
606 .peek = qdisc_peek_dequeued, 631 .peek = qdisc_peek_dequeued,
607 .drop = fq_codel_drop, 632 .drop = fq_codel_qdisc_drop,
608 .init = fq_codel_init, 633 .init = fq_codel_init,
609 .reset = fq_codel_reset, 634 .reset = fq_codel_reset,
610 .destroy = fq_codel_destroy, 635 .destroy = fq_codel_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
216 .peek = qdisc_peek_head, 216 .peek = qdisc_peek_head,
217 .init = plug_init, 217 .init = plug_init,
218 .change = plug_change, 218 .change = plug_change,
219 .reset = qdisc_reset_queue,
219 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
220}; 221};
221 222
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7d1492663360..52f75a5473e1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -306,10 +306,10 @@ drop:
306 len = qdisc_pkt_len(skb); 306 len = qdisc_pkt_len(skb);
307 slot->backlog -= len; 307 slot->backlog -= len;
308 sfq_dec(q, x); 308 sfq_dec(q, x);
309 kfree_skb(skb);
310 sch->q.qlen--; 309 sch->q.qlen--;
311 qdisc_qstats_drop(sch); 310 qdisc_qstats_drop(sch);
312 qdisc_qstats_backlog_dec(sch, skb); 311 qdisc_qstats_backlog_dec(sch, skb);
312 kfree_skb(skb);
313 return len; 313 return len;
314 } 314 }
315 315
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2201 return -EFAULT; 2201 return -EFAULT;
2202 2202
2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2205 "Requested SCTP_SNDRCVINFO event.\n"
2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2207 current->comm, task_pid_nr(current));
2208
2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2203 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2210 * if there is no data to be sent or retransmit, the stack will 2204 * if there is no data to be sent or retransmit, the stack will
2211 * immediately send up this notification. 2205 * immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req) 241 if (!req)
242 goto not_found; 242 goto not_found;
243 /* Note: this 'free' request adds it to xprt->bc_pa_list */ 243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt_free_bc_request(req); 244 xprt->bc_alloc_count++;
245 } 245 }
246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list); 247 rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
336 336
337 spin_lock(&xprt->bc_pa_lock); 337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list); 338 list_del(&req->rq_bc_pa_list);
339 xprt->bc_alloc_count--; 339 xprt_dec_alloc_count(xprt, 1);
340 spin_unlock(&xprt->bc_pa_lock); 340 spin_unlock(&xprt->bc_pa_lock);
341 341
342 req->rq_private_buf.len = copied; 342 req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
1902 1902
1903 switch (task->tk_status) { 1903 switch (task->tk_status) {
1904 case -EAGAIN: 1904 case -EAGAIN:
1905 case -ENOBUFS:
1905 break; 1906 break;
1906 default: 1907 default:
1907 dprint_status(task); 1908 dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
1928 case -ECONNABORTED: 1929 case -ECONNABORTED:
1929 case -EADDRINUSE: 1930 case -EADDRINUSE:
1930 case -ENOTCONN: 1931 case -ENOTCONN:
1931 case -ENOBUFS:
1932 case -EPIPE: 1932 case -EPIPE:
1933 rpc_task_force_reencode(task); 1933 rpc_task_force_reencode(task);
1934 } 1934 }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
2057 case -ECONNABORTED: 2057 case -ECONNABORTED:
2058 rpc_force_rebind(clnt); 2058 rpc_force_rebind(clnt);
2059 case -EADDRINUSE: 2059 case -EADDRINUSE:
2060 case -ENOBUFS:
2061 rpc_delay(task, 3*HZ); 2060 rpc_delay(task, 3*HZ);
2062 case -EPIPE: 2061 case -EPIPE:
2063 case -ENOTCONN: 2062 case -ENOTCONN:
2064 task->tk_action = call_bind; 2063 task->tk_action = call_bind;
2065 break; 2064 break;
2065 case -ENOBUFS:
2066 rpc_delay(task, HZ>>2);
2066 case -EAGAIN: 2067 case -EAGAIN:
2067 task->tk_action = call_transmit; 2068 task->tk_action = call_transmit;
2068 break; 2069 break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
527 true, &sent); 527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n", 528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status); 529 __func__, xdr->len - req->rq_bytes_sent, status);
530
531 if (status == -EAGAIN && sock_writeable(transport->inet))
532 status = -ENOBUFS;
533
530 if (likely(sent > 0) || status == 0) { 534 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent; 535 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent; 536 req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
539 543
540 switch (status) { 544 switch (status) {
541 case -ENOBUFS: 545 case -ENOBUFS:
546 break;
542 case -EAGAIN: 547 case -EAGAIN:
543 status = xs_nospace(task); 548 status = xs_nospace(task);
544 break; 549 break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
589 if (status == -EPERM) 594 if (status == -EPERM)
590 goto process_status; 595 goto process_status;
591 596
597 if (status == -EAGAIN && sock_writeable(transport->inet))
598 status = -ENOBUFS;
599
592 if (sent > 0 || status == 0) { 600 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent; 601 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen) 602 if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
669 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
670 xdr->len - req->rq_bytes_sent, status); 678 xdr->len - req->rq_bytes_sent, status);
671 679
672 if (unlikely(sent == 0 && status < 0))
673 break;
674
675 /* If we've sent the entire packet, immediately 680 /* If we've sent the entire packet, immediately
676 * reset the count of bytes sent. */ 681 * reset the count of bytes sent. */
677 req->rq_bytes_sent += sent; 682 req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 686 return 0;
682 } 687 }
683 688
684 if (sent != 0) 689 if (status < 0)
685 continue; 690 break;
686 status = -EAGAIN; 691 if (sent == 0) {
687 break; 692 status = -EAGAIN;
693 break;
694 }
688 } 695 }
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
697 status = -ENOBUFS;
689 698
690 switch (status) { 699 switch (status) {
691 case -ENOTSOCK: 700 case -ENOTSOCK:
692 status = -ENOTCONN; 701 status = -ENOTCONN;
693 /* Should we call xs_close() here? */ 702 /* Should we call xs_close() here? */
694 break; 703 break;
695 case -ENOBUFS:
696 case -EAGAIN: 704 case -EAGAIN:
697 status = xs_nospace(task); 705 status = xs_nospace(task);
698 break; 706 break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
703 case -ECONNREFUSED: 711 case -ECONNREFUSED:
704 case -ENOTCONN: 712 case -ENOTCONN:
705 case -EADDRINUSE: 713 case -EADDRINUSE:
714 case -ENOBUFS:
706 case -EPIPE: 715 case -EPIPE:
707 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
708 } 717 }
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 84f77a054025..9f2add3cba26 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -171,8 +171,10 @@ int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
171 * released. 171 * released.
172 */ 172 */
173 173
174 attr->trans = SWITCHDEV_TRANS_ABORT; 174 if (err != -EOPNOTSUPP) {
175 __switchdev_port_attr_set(dev, attr); 175 attr->trans = SWITCHDEV_TRANS_ABORT;
176 __switchdev_port_attr_set(dev, attr);
177 }
176 178
177 return err; 179 return err;
178 } 180 }
@@ -249,8 +251,10 @@ int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
249 * released. 251 * released.
250 */ 252 */
251 253
252 obj->trans = SWITCHDEV_TRANS_ABORT; 254 if (err != -EOPNOTSUPP) {
253 __switchdev_port_obj_add(dev, obj); 255 obj->trans = SWITCHDEV_TRANS_ABORT;
256 __switchdev_port_obj_add(dev, obj);
257 }
254 258
255 return err; 259 return err;
256 } 260 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 46b6ed534ef2..3a7567f690f3 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2007,6 +2007,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); 2007 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
2008 if (res) 2008 if (res)
2009 goto exit; 2009 goto exit;
2010 security_sk_clone(sock->sk, new_sock->sk);
2010 2011
2011 new_sk = new_sock->sk; 2012 new_sk = new_sock->sk;
2012 new_tsock = tipc_sk(new_sk); 2013 new_tsock = tipc_sk(new_sk);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 915b328b9ac5..59cabc9bce69 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
797 return false; 797 return false;
798} 798}
799 799
800bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 800static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
801 struct cfg80211_chan_def *chandef, 801 struct cfg80211_chan_def *chandef,
802 enum nl80211_iftype iftype) 802 enum nl80211_iftype iftype,
803 bool check_no_ir)
803{ 804{
804 bool res; 805 bool res;
805 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
806 IEEE80211_CHAN_RADAR; 807 IEEE80211_CHAN_RADAR;
807 808
808 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
809 810
810 /* 811 if (check_no_ir)
811 * Under certain conditions suggested by some regulatory bodies a
812 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
813 * only if such relaxations are not enabled and the conditions are not
814 * met.
815 */
816 if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
817 prohibited_flags |= IEEE80211_CHAN_NO_IR; 812 prohibited_flags |= IEEE80211_CHAN_NO_IR;
818 813
819 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 trace_cfg80211_return_bool(res); 822 trace_cfg80211_return_bool(res);
828 return res; 823 return res;
829} 824}
825
826bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 struct cfg80211_chan_def *chandef,
828 enum nl80211_iftype iftype)
829{
830 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
831}
830EXPORT_SYMBOL(cfg80211_reg_can_beacon); 832EXPORT_SYMBOL(cfg80211_reg_can_beacon);
831 833
834bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
835 struct cfg80211_chan_def *chandef,
836 enum nl80211_iftype iftype)
837{
838 bool check_no_ir;
839
840 ASSERT_RTNL();
841
842 /*
843 * Under certain conditions suggested by some regulatory bodies a
844 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
845 * only if such relaxations are not enabled and the conditions are not
846 * met.
847 */
848 check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
849 chandef->chan);
850
851 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
852}
853EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
854
832int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 855int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
833 struct cfg80211_chan_def *chandef) 856 struct cfg80211_chan_def *chandef)
834{ 857{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c264effd00a6..76b41578a838 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
2003 switch (iftype) { 2003 switch (iftype) {
2004 case NL80211_IFTYPE_AP: 2004 case NL80211_IFTYPE_AP:
2005 case NL80211_IFTYPE_P2P_GO: 2005 case NL80211_IFTYPE_P2P_GO:
2006 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
2007 iftype)) {
2007 result = -EINVAL; 2008 result = -EINVAL;
2008 break; 2009 break;
2009 } 2010 }
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3404 } else if (!nl80211_get_ap_channel(rdev, &params))
3404 return -EINVAL; 3405 return -EINVAL;
3405 3406
3406 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3407 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
3407 wdev->iftype)) 3408 wdev->iftype))
3408 return -EINVAL; 3409 return -EINVAL;
3409 3410
3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3411 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ skip_beacons:
6492 if (err) 6493 if (err)
6493 return err; 6494 return err;
6494 6495
6495 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6496 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
6496 wdev->iftype)) 6497 wdev->iftype))
6497 return -EINVAL; 6498 return -EINVAL;
6498 6499
6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6500 err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
10170 return -EINVAL; 10171 return -EINVAL;
10171 10172
10172 /* we will be active on the TDLS link */ 10173 /* we will be active on the TDLS link */
10173 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10174 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
10175 wdev->iftype))
10174 return -EINVAL; 10176 return -EINVAL;
10175 10177
10176 /* don't allow switching to DFS channels */ 10178 /* don't allow switching to DFS channels */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d359e0610198..aa2d75482017 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
544 reg_regdb_query(alpha2); 544 reg_regdb_query(alpha2);
545 545
546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
547 pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
550 550
551 if (!is_world_regdom((char *) alpha2)) 551 if (!is_world_regdom((char *) alpha2))
552 pr_info("Calling CRDA for country: %c%c\n", 552 pr_debug("Calling CRDA for country: %c%c\n",
553 alpha2[0], alpha2[1]); 553 alpha2[0], alpha2[1]);
554 else 554 else
555 pr_info("Calling CRDA to update world regulatory domain\n"); 555 pr_debug("Calling CRDA to update world regulatory domain\n");
556 556
557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
558} 558}
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1589 case NL80211_IFTYPE_AP: 1589 case NL80211_IFTYPE_AP:
1590 case NL80211_IFTYPE_P2P_GO: 1590 case NL80211_IFTYPE_P2P_GO:
1591 case NL80211_IFTYPE_ADHOC: 1591 case NL80211_IFTYPE_ADHOC:
1592 return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
1593 case NL80211_IFTYPE_STATION: 1593 case NL80211_IFTYPE_STATION:
1594 case NL80211_IFTYPE_P2P_CLIENT: 1594 case NL80211_IFTYPE_P2P_CLIENT:
1595 return cfg80211_chandef_usable(wiphy, &chandef, 1595 return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index af3617c9879e..a808279a432a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2358 2358
2359TRACE_EVENT(cfg80211_reg_can_beacon, 2359TRACE_EVENT(cfg80211_reg_can_beacon,
2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2361 enum nl80211_iftype iftype), 2361 enum nl80211_iftype iftype, bool check_no_ir),
2362 TP_ARGS(wiphy, chandef, iftype), 2362 TP_ARGS(wiphy, chandef, iftype, check_no_ir),
2363 TP_STRUCT__entry( 2363 TP_STRUCT__entry(
2364 WIPHY_ENTRY 2364 WIPHY_ENTRY
2365 CHAN_DEF_ENTRY 2365 CHAN_DEF_ENTRY
2366 __field(enum nl80211_iftype, iftype) 2366 __field(enum nl80211_iftype, iftype)
2367 __field(bool, check_no_ir)
2367 ), 2368 ),
2368 TP_fast_assign( 2369 TP_fast_assign(
2369 WIPHY_ASSIGN; 2370 WIPHY_ASSIGN;
2370 CHAN_DEF_ASSIGN(chandef); 2371 CHAN_DEF_ASSIGN(chandef);
2371 __entry->iftype = iftype; 2372 __entry->iftype = iftype;
2373 __entry->check_no_ir = check_no_ir;
2372 ), 2374 ),
2373 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2375 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
2374 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2376 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
2377 BOOL_TO_STR(__entry->check_no_ir))
2375); 2378);
2376 2379
2377TRACE_EVENT(cfg80211_chandef_dfs_required, 2380TRACE_EVENT(cfg80211_chandef_dfs_required,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 8965d1bb8811..125d6402f64f 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -168,7 +168,10 @@
168 * 168 *
169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) 169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
170 * Use __get_dynamic_array_len(foo) to get the length of the array 170 * Use __get_dynamic_array_len(foo) to get the length of the array
171 * saved. 171 * saved. Note, __get_dynamic_array_len() returns the total allocated
172 * length of the dynamic array; __print_array() expects the second
173 * parameter to be the number of elements. To get that, the array length
174 * needs to be divided by the element size.
172 * 175 *
173 * For __string(foo, bar) use __get_str(foo) 176 * For __string(foo, bar) use __get_str(foo)
174 * 177 *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
288 * This prints out the array that is defined by __array in a nice format. 291 * This prints out the array that is defined by __array in a nice format.
289 */ 292 */
290 __print_array(__get_dynamic_array(list), 293 __print_array(__get_dynamic_array(list),
291 __get_dynamic_array_len(list), 294 __get_dynamic_array_len(list) / sizeof(int),
292 sizeof(int)), 295 sizeof(int)),
293 __get_str(str), __get_bitmask(cpus)) 296 __get_str(str), __get_bitmask(cpus))
294); 297);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 90e1edc8dd42..d5c8e9a3a73c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2599,7 +2599,7 @@ sub process {
2599# if LONG_LINE is ignored, the other 2 types are also ignored 2599# if LONG_LINE is ignored, the other 2 types are also ignored
2600# 2600#
2601 2601
2602 if ($length > $max_line_length) { 2602 if ($line =~ /^\+/ && $length > $max_line_length) {
2603 my $msg_type = "LONG_LINE"; 2603 my $msg_type = "LONG_LINE";
2604 2604
2605 # Check the allowed long line types first 2605 # Check the allowed long line types first
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522d8d22..f3d3fb42b873 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
137my $kconfig = $ARGV[1]; 137my $kconfig = $ARGV[1];
138my $lsmod_file = $ENV{'LSMOD'}; 138my $lsmod_file = $ENV{'LSMOD'};
139 139
140my @makefiles = `find $ksource -name Makefile 2>/dev/null`; 140my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
141chomp @makefiles; 141chomp @makefiles;
142 142
143my %depends; 143my %depends;
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index eff7de1fc82e..e70fcd12eeeb 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -63,6 +63,8 @@ int main(void)
63 63
64 DEVID(acpi_device_id); 64 DEVID(acpi_device_id);
65 DEVID_FIELD(acpi_device_id, id); 65 DEVID_FIELD(acpi_device_id, id);
66 DEVID_FIELD(acpi_device_id, cls);
67 DEVID_FIELD(acpi_device_id, cls_msk);
66 68
67 DEVID(pnp_device_id); 69 DEVID(pnp_device_id);
68 DEVID_FIELD(pnp_device_id, id); 70 DEVID_FIELD(pnp_device_id, id);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 84c86f3cd6cd..5f2088209132 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -523,12 +523,40 @@ static int do_serio_entry(const char *filename,
523} 523}
524ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry); 524ADD_TO_DEVTABLE("serio", serio_device_id, do_serio_entry);
525 525
526/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */ 526/* looks like: "acpi:ACPI0003" or "acpi:PNP0C0B" or "acpi:LNXVIDEO" or
527 * "acpi:bbsspp" (bb=base-class, ss=sub-class, pp=prog-if)
528 *
529 * NOTE: Each driver should use one of the following : _HID, _CIDs
530 * or _CLS. Also, bb, ss, and pp can be substituted with ??
531 * as don't care byte.
532 */
527static int do_acpi_entry(const char *filename, 533static int do_acpi_entry(const char *filename,
528 void *symval, char *alias) 534 void *symval, char *alias)
529{ 535{
530 DEF_FIELD_ADDR(symval, acpi_device_id, id); 536 DEF_FIELD_ADDR(symval, acpi_device_id, id);
531 sprintf(alias, "acpi*:%s:*", *id); 537 DEF_FIELD_ADDR(symval, acpi_device_id, cls);
538 DEF_FIELD_ADDR(symval, acpi_device_id, cls_msk);
539
540 if (id && strlen((const char *)*id))
541 sprintf(alias, "acpi*:%s:*", *id);
542 else if (cls) {
543 int i, byte_shift, cnt = 0;
544 unsigned int msk;
545
546 sprintf(&alias[cnt], "acpi*:");
547 cnt = 6;
548 for (i = 1; i <= 3; i++) {
549 byte_shift = 8 * (3-i);
550 msk = (*cls_msk >> byte_shift) & 0xFF;
551 if (msk)
552 sprintf(&alias[cnt], "%02x",
553 (*cls >> byte_shift) & 0xFF);
554 else
555 sprintf(&alias[cnt], "??");
556 cnt += 2;
557 }
558 sprintf(&alias[cnt], ":*");
559 }
532 return 1; 560 return 1;
533} 561}
534ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry); 562ADD_TO_DEVTABLE("acpi", acpi_device_id, do_acpi_entry);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 91ee1b2e0f9a..12d3db3bd46b 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -886,7 +886,8 @@ static void check_section(const char *modname, struct elf_info *elf,
886#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ 886#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
887 ".kprobes.text" 887 ".kprobes.text"
888#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ 888#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
889 ".fixup", ".entry.text", ".exception.text", ".text.*" 889 ".fixup", ".entry.text", ".exception.text", ".text.*", \
890 ".coldtext"
890 891
891#define INIT_SECTIONS ".init.*" 892#define INIT_SECTIONS ".init.*"
892#define MEM_INIT_SECTIONS ".meminit.*" 893#define MEM_INIT_SECTIONS ".meminit.*"
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e72548b5897e..d33437007ad2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
1181 if (index_key->type == &key_type_keyring) 1181 if (index_key->type == &key_type_keyring)
1182 up_write(&keyring_serialise_link_sem); 1182 up_write(&keyring_serialise_link_sem);
1183 1183
1184 if (edit && !edit->dead_leaf) { 1184 if (edit) {
1185 key_payload_reserve(keyring, 1185 if (!edit->dead_leaf) {
1186 keyring->datalen - KEYQUOTA_LINK_BYTES); 1186 key_payload_reserve(keyring,
1187 keyring->datalen - KEYQUOTA_LINK_BYTES);
1188 }
1187 assoc_array_cancel_edit(edit); 1189 assoc_array_cancel_edit(edit);
1188 } 1190 }
1189 up_write(&keyring->sem); 1191 up_write(&keyring->sem);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 623108199641..564079c5c49d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3283,7 +3283,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
3283 int rc = 0; 3283 int rc = 0;
3284 3284
3285 if (default_noexec && 3285 if (default_noexec &&
3286 (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { 3286 (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
3287 (!shared && (prot & PROT_WRITE)))) {
3287 /* 3288 /*
3288 * We are making executable an anonymous mapping or a 3289 * We are making executable an anonymous mapping or a
3289 * private file mapping that will also be writable. 3290 * private file mapping that will also be writable.
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index afe6a269ec17..57644b1dc42e 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
153 if (offset == (u32)-1) 153 if (offset == (u32)-1)
154 return 0; 154 return 0;
155 155
156 /* don't waste ebitmap space if the netlabel bitmap is empty */
157 if (bitmap == 0) {
158 offset += EBITMAP_UNIT_SIZE;
159 continue;
160 }
161
156 if (e_iter == NULL || 162 if (e_iter == NULL ||
157 offset >= e_iter->startbit + EBITMAP_SIZE) { 163 offset >= e_iter->startbit + EBITMAP_SIZE) {
158 e_prev = e_iter; 164 e_prev = e_iter;
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9ed32502470e..5ebb89687936 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -406,6 +406,7 @@ static __init int yama_init(void)
406 */ 406 */
407 if (!security_module_enable("yama")) 407 if (!security_module_enable("yama"))
408 return 0; 408 return 0;
409 yama_add_hooks();
409#endif 410#endif
410 pr_info("Yama: becoming mindful.\n"); 411 pr_info("Yama: becoming mindful.\n");
411 412
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index 2b50cbe6aca9..55791a0b3943 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -18,6 +18,68 @@
18#include <sound/ac97_codec.h> 18#include <sound/ac97_codec.h>
19 19
20/* 20/*
21 * snd_ac97_check_id() - Reads and checks the vendor ID of the device
22 * @ac97: The AC97 device to check
23 * @id: The ID to compare to
24 * @id_mask: Mask that is applied to the device ID before comparing to @id
25 *
26 * If @id is 0 this function returns true if the read device vendor ID is
27 * a valid ID. If @id is non 0 this functions returns true if @id
28 * matches the read vendor ID. Otherwise the function returns false.
29 */
30static bool snd_ac97_check_id(struct snd_ac97 *ac97, unsigned int id,
31 unsigned int id_mask)
32{
33 ac97->id = ac97->bus->ops->read(ac97, AC97_VENDOR_ID1) << 16;
34 ac97->id |= ac97->bus->ops->read(ac97, AC97_VENDOR_ID2);
35
36 if (ac97->id == 0x0 || ac97->id == 0xffffffff)
37 return false;
38
39 if (id != 0 && id != (ac97->id & id_mask))
40 return false;
41
42 return true;
43}
44
45/**
46 * snd_ac97_reset() - Reset AC'97 device
47 * @ac97: The AC'97 device to reset
48 * @try_warm: Try a warm reset first
49 * @id: Expected device vendor ID
50 * @id_mask: Mask that is applied to the device ID before comparing to @id
51 *
52 * This function resets the AC'97 device. If @try_warm is true the function
53 * first performs a warm reset. If the warm reset is successful the function
54 * returns 1. Otherwise or if @try_warm is false the function issues cold reset
55 * followed by a warm reset. If this is successful the function returns 0,
56 * otherwise a negative error code. If @id is 0 any valid device ID will be
57 * accepted, otherwise only the ID that matches @id and @id_mask is accepted.
58 */
59int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
60 unsigned int id_mask)
61{
62 struct snd_ac97_bus_ops *ops = ac97->bus->ops;
63
64 if (try_warm && ops->warm_reset) {
65 ops->warm_reset(ac97);
66 if (snd_ac97_check_id(ac97, id, id_mask))
67 return 1;
68 }
69
70 if (ops->reset)
71 ops->reset(ac97);
72 if (ops->warm_reset)
73 ops->warm_reset(ac97);
74
75 if (snd_ac97_check_id(ac97, id, id_mask))
76 return 0;
77
78 return -ENODEV;
79}
80EXPORT_SYMBOL_GPL(snd_ac97_reset);
81
82/*
21 * Let drivers decide whether they want to support given codec from their 83 * Let drivers decide whether they want to support given codec from their
22 * probe method. Drivers have direct access to the struct snd_ac97 84 * probe method. Drivers have direct access to the struct snd_ac97
23 * structure and may decide based on the id field amongst other things. 85 * structure and may decide based on the id field amongst other things.
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index d126c03361ae..75888dd38a7f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
85void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 85void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
86{ 86{
87 if (substream->pcm->nonatomic) { 87 if (substream->pcm->nonatomic) {
88 down_read(&snd_pcm_link_rwsem); 88 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
89 mutex_lock(&substream->self_group.mutex); 89 mutex_lock(&substream->self_group.mutex);
90 } else { 90 } else {
91 read_lock(&snd_pcm_link_rwlock); 91 read_lock(&snd_pcm_link_rwlock);
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 7bb988fa6b6d..2a153d260836 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
740 s->data_block_counter != UINT_MAX) 740 s->data_block_counter != UINT_MAX)
741 data_block_counter = s->data_block_counter; 741 data_block_counter = s->data_block_counter;
742 742
743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || 743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
744 (s->data_block_counter == UINT_MAX)) { 744 data_block_counter == s->tx_first_dbc) ||
745 s->data_block_counter == UINT_MAX) {
745 lost = false; 746 lost = false;
746 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 747 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
747 lost = data_block_counter != s->data_block_counter; 748 lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 26b909329e54..b2cf9e75693b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -157,6 +157,8 @@ struct amdtp_stream {
157 157
158 /* quirk: fixed interval of dbc between previos/current packets. */ 158 /* quirk: fixed interval of dbc between previos/current packets. */
159 unsigned int tx_dbc_interval; 159 unsigned int tx_dbc_interval;
160 /* quirk: indicate the value of dbc field in a first packet. */
161 unsigned int tx_first_dbc;
160 162
161 bool callbacked; 163 bool callbacked;
162 wait_queue_head_t callback_wait; 164 wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 2682e7e3e5c9..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
248 err = get_hardware_info(efw); 248 err = get_hardware_info(efw);
249 if (err < 0) 249 if (err < 0)
250 goto error; 250 goto error;
251 /* AudioFire8 (since 2009) and AudioFirePre8 */
251 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) 252 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
252 efw->is_af9 = true; 253 efw->is_af9 = true;
254 /* These models uses the same firmware. */
255 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
256 entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
257 entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
258 entry->model_id == MODEL_GIBSON_RIP ||
259 entry->model_id == MODEL_GIBSON_GOLDTOP)
260 efw->is_fireworks3 = true;
253 261
254 snd_efw_proc_init(efw); 262 snd_efw_proc_init(efw);
255 263
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4f0201a95222..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -71,6 +71,7 @@ struct snd_efw {
71 71
72 /* for quirks */ 72 /* for quirks */
73 bool is_af9; 73 bool is_af9;
74 bool is_fireworks3;
74 u32 firmware_version; 75 u32 firmware_version;
75 76
76 unsigned int midi_in_ports; 77 unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index c55db1bddc80..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; 172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
173 /* Fireworks reset dbc at bus reset. */ 173 /* Fireworks reset dbc at bus reset. */
174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; 174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
175 /*
176 * But Recent firmwares starts packets with non-zero dbc.
177 * Driver version 5.7.6 installs firmware version 5.7.3.
178 */
179 if (efw->is_fireworks3 &&
180 (efw->firmware_version == 0x5070000 ||
181 efw->firmware_version == 0x5070300 ||
182 efw->firmware_version == 0x5080000))
183 efw->tx_stream.tx_first_dbc = 0x02;
175 /* AudioFire9 always reports wrong dbs. */ 184 /* AudioFire9 always reports wrong dbs. */
176 if (efw->is_af9) 185 if (efw->is_af9)
177 efw->tx_stream.flags |= CIP_WRONG_DBS; 186 efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b2da19b60f4e..358f16195483 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
44 44
45 offset = snd_hdac_chip_readl(bus, LLCH); 45 offset = snd_hdac_chip_readl(bus, LLCH);
46 46
47 if (offset < 0)
48 return -EIO;
49
50 /* Lets walk the linked capabilities list */ 47 /* Lets walk the linked capabilities list */
51 do { 48 do {
52 cur_cap = _snd_hdac_chip_read(l, bus, offset); 49 cur_cap = _snd_hdac_chip_read(l, bus, offset);
53 50
54 if (cur_cap < 0)
55 return -EIO;
56
57 dev_dbg(bus->dev, "Capability version: 0x%x\n", 51 dev_dbg(bus->dev, "Capability version: 0x%x\n",
58 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); 52 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
59 53
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index f8ffbdbb450d..3de47dd1a76d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
299 if (stream->direction != substream->stream) 299 if (stream->direction != substream->stream)
300 continue; 300 continue;
301 301
302 if (stream->opened) { 302 if (!stream->opened) {
303 if (!hstream->decoupled) 303 if (!hstream->decoupled)
304 snd_hdac_ext_stream_decouple(ebus, hstream, true); 304 snd_hdac_ext_stream_decouple(ebus, hstream, true);
305 res = hstream; 305 res = hstream;
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 442500e06b7c..5676b849379d 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
56 enable ? "enable" : "disable"); 56 enable ? "enable" : "disable");
57 57
58 if (enable) { 58 if (enable) {
59 if (!bus->i915_power_refcount++) 59 if (!bus->i915_power_refcount++) {
60 acomp->ops->get_power(acomp->dev); 60 acomp->ops->get_power(acomp->dev);
61 snd_hdac_set_codec_wakeup(bus, true);
62 snd_hdac_set_codec_wakeup(bus, false);
63 }
61 } else { 64 } else {
62 WARN_ON(!bus->i915_power_refcount); 65 WARN_ON(!bus->i915_power_refcount);
63 if (!--bus->i915_power_refcount) 66 if (!--bus->i915_power_refcount)
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ac0db1679f09..b077bb644434 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -5175,7 +5175,7 @@ static int alt_playback_pcm_open(struct hda_pcm_stream *hinfo,
5175 int err = 0; 5175 int err = 0;
5176 5176
5177 mutex_lock(&spec->pcm_mutex); 5177 mutex_lock(&spec->pcm_mutex);
5178 if (!spec->indep_hp_enabled) 5178 if (spec->indep_hp && !spec->indep_hp_enabled)
5179 err = -EBUSY; 5179 err = -EBUSY;
5180 else 5180 else
5181 spec->active_streams |= 1 << STREAM_INDEP_HP; 5181 spec->active_streams |= 1 << STREAM_INDEP_HP;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 745535d1840a..c38c68f57938 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
867 867
868 chip = card->private_data; 868 chip = card->private_data;
869 hda = container_of(chip, struct hda_intel, chip); 869 hda = container_of(chip, struct hda_intel, chip);
870 if (chip->disabled || hda->init_failed) 870 if (chip->disabled || hda->init_failed || !chip->running)
871 return 0; 871 return 0;
872 872
873 bus = azx_bus(chip); 873 bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
902 902
903 chip = card->private_data; 903 chip = card->private_data;
904 hda = container_of(chip, struct hda_intel, chip); 904 hda = container_of(chip, struct hda_intel, chip);
905 if (chip->disabled || hda->init_failed) 905 if (chip->disabled || hda->init_failed || !chip->running)
906 return 0; 906 return 0;
907 907
908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
979 if (!azx_has_pm_runtime(chip)) 979 if (!azx_has_pm_runtime(chip))
980 return 0; 980 return 0;
981 981
982 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 982 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
983 && hda->need_i915_power) { 983 bus = azx_bus(chip);
984 bus = azx_bus(chip); 984 if (hda->need_i915_power) {
985 snd_hdac_display_power(bus, true); 985 snd_hdac_display_power(bus, true);
986 haswell_set_bclk(hda); 986 haswell_set_bclk(hda);
987 /* toggle codec wakeup bit for STATESTS read */ 987 } else {
988 snd_hdac_set_codec_wakeup(bus, true); 988 /* toggle codec wakeup bit for STATESTS read */
989 snd_hdac_set_codec_wakeup(bus, false); 989 snd_hdac_set_codec_wakeup(bus, true);
990 snd_hdac_set_codec_wakeup(bus, false);
991 }
990 } 992 }
991 993
992 /* Read STATESTS before controller reset */ 994 /* Read STATESTS before controller reset */
@@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
1025 return 0; 1027 return 0;
1026 1028
1027 if (!power_save_controller || !azx_has_pm_runtime(chip) || 1029 if (!power_save_controller || !azx_has_pm_runtime(chip) ||
1028 azx_bus(chip)->codec_powered) 1030 azx_bus(chip)->codec_powered || !chip->running)
1029 return -EBUSY; 1031 return -EBUSY;
1030 1032
1031 return 0; 1033 return 0;
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
2182 /* ATI HDMI */ 2184 /* ATI HDMI */
2183 { PCI_DEVICE(0x1002, 0x1308), 2185 { PCI_DEVICE(0x1002, 0x1308),
2184 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2186 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2187 { PCI_DEVICE(0x1002, 0x157a),
2188 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2185 { PCI_DEVICE(0x1002, 0x793b), 2189 { PCI_DEVICE(0x1002, 0x793b),
2186 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, 2190 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2187 { PCI_DEVICE(0x1002, 0x7919), 2191 { PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
2236 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2240 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2237 { PCI_DEVICE(0x1002, 0xaab0), 2241 { PCI_DEVICE(0x1002, 0xaab0),
2238 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2242 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2243 { PCI_DEVICE(0x1002, 0xaac0),
2244 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2239 { PCI_DEVICE(0x1002, 0xaac8), 2245 { PCI_DEVICE(0x1002, 0xaac8),
2240 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2246 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2247 { PCI_DEVICE(0x1002, 0xaad8),
2248 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2249 { PCI_DEVICE(0x1002, 0xaae8),
2250 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2241 /* VIA VT8251/VT8237A */ 2251 /* VIA VT8251/VT8237A */
2242 { PCI_DEVICE(0x1106, 0x3288), 2252 { PCI_DEVICE(0x1106, 0x3288),
2243 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, 2253 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 25ccf781fbe7..584a0343ab0c 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
999 999
1000 spec->spdif_present = spdif_present; 1000 spec->spdif_present = spdif_present;
1001 /* SPDIF TX on/off */ 1001 /* SPDIF TX on/off */
1002 if (spdif_present) 1002 snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
1003 snd_hda_set_pin_ctl(codec, spdif_pin,
1004 spdif_present ? PIN_OUT : 0);
1005 1003
1006 cs_automute(codec); 1004 cs_automute(codec);
1007} 1005}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2f2433845d04..a97db5fc8a15 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3512{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3512{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3513{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3513{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3514{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, 3514{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
3515{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi },
3515{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3516{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3516{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3517{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
3517{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3518{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3527,6 +3528,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3527{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi }, 3528{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
3528{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi }, 3529{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
3529{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi }, 3530{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi },
3531{ .id = 0x8086280a, .name = "Broxton HDMI", .patch = patch_generic_hdmi },
3530{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi }, 3532{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
3531{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi }, 3533{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
3532{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi }, 3534{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
@@ -3575,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
3575MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3577MODULE_ALIAS("snd-hda-codec-id:10de0070");
3576MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3578MODULE_ALIAS("snd-hda-codec-id:10de0071");
3577MODULE_ALIAS("snd-hda-codec-id:10de0072"); 3579MODULE_ALIAS("snd-hda-codec-id:10de0072");
3580MODULE_ALIAS("snd-hda-codec-id:10de007d");
3578MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3581MODULE_ALIAS("snd-hda-codec-id:10de8001");
3579MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3582MODULE_ALIAS("snd-hda-codec-id:11069f80");
3580MODULE_ALIAS("snd-hda-codec-id:11069f81"); 3583MODULE_ALIAS("snd-hda-codec-id:11069f81");
@@ -3591,6 +3594,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862806");
3591MODULE_ALIAS("snd-hda-codec-id:80862807"); 3594MODULE_ALIAS("snd-hda-codec-id:80862807");
3592MODULE_ALIAS("snd-hda-codec-id:80862808"); 3595MODULE_ALIAS("snd-hda-codec-id:80862808");
3593MODULE_ALIAS("snd-hda-codec-id:80862809"); 3596MODULE_ALIAS("snd-hda-codec-id:80862809");
3597MODULE_ALIAS("snd-hda-codec-id:8086280a");
3594MODULE_ALIAS("snd-hda-codec-id:80862880"); 3598MODULE_ALIAS("snd-hda-codec-id:80862880");
3595MODULE_ALIAS("snd-hda-codec-id:80862882"); 3599MODULE_ALIAS("snd-hda-codec-id:80862882");
3596MODULE_ALIAS("snd-hda-codec-id:80862883"); 3600MODULE_ALIAS("snd-hda-codec-id:80862883");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b3b44681d3cf..374ea53288ca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), 2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), 2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
2226 2226
2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -4441,6 +4441,55 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
4441 } 4441 }
4442} 4442}
4443 4443
4444/* Hook to update amp GPIO4 for automute */
4445static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
4446 struct hda_jack_callback *jack)
4447{
4448 struct alc_spec *spec = codec->spec;
4449
4450 snd_hda_gen_hp_automute(codec, jack);
4451 /* mute_led_polarity is set to 0, so we pass inverted value here */
4452 alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
4453}
4454
4455/* Manage GPIOs for HP EliteBook Folio 9480m.
4456 *
4457 * GPIO4 is the headphone amplifier power control
4458 * GPIO3 is the audio output mute indicator LED
4459 */
4460
4461static void alc280_fixup_hp_9480m(struct hda_codec *codec,
4462 const struct hda_fixup *fix,
4463 int action)
4464{
4465 struct alc_spec *spec = codec->spec;
4466 static const struct hda_verb gpio_init[] = {
4467 { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
4468 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
4469 {}
4470 };
4471
4472 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4473 /* Set the hooks to turn the headphone amp on/off
4474 * as needed
4475 */
4476 spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
4477 spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook;
4478
4479 /* The GPIOs are currently off */
4480 spec->gpio_led = 0;
4481
4482 /* GPIO3 is connected to the output mute LED,
4483 * high is on, low is off
4484 */
4485 spec->mute_led_polarity = 0;
4486 spec->gpio_mute_led_mask = 0x08;
4487
4488 /* Initialize GPIO configuration */
4489 snd_hda_add_verbs(codec, gpio_init);
4490 }
4491}
4492
4444/* for hda_fixup_thinkpad_acpi() */ 4493/* for hda_fixup_thinkpad_acpi() */
4445#include "thinkpad_helper.c" 4494#include "thinkpad_helper.c"
4446 4495
@@ -4521,6 +4570,7 @@ enum {
4521 ALC286_FIXUP_HP_GPIO_LED, 4570 ALC286_FIXUP_HP_GPIO_LED,
4522 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, 4571 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
4523 ALC280_FIXUP_HP_DOCK_PINS, 4572 ALC280_FIXUP_HP_DOCK_PINS,
4573 ALC280_FIXUP_HP_9480M,
4524 ALC288_FIXUP_DELL_HEADSET_MODE, 4574 ALC288_FIXUP_DELL_HEADSET_MODE,
4525 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, 4575 ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
4526 ALC288_FIXUP_DELL_XPS_13_GPIO6, 4576 ALC288_FIXUP_DELL_XPS_13_GPIO6,
@@ -5011,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
5011 { 0x14, 0x90170110 }, 5061 { 0x14, 0x90170110 },
5012 { 0x17, 0x40000008 }, 5062 { 0x17, 0x40000008 },
5013 { 0x18, 0x411111f0 }, 5063 { 0x18, 0x411111f0 },
5014 { 0x19, 0x411111f0 }, 5064 { 0x19, 0x01a1913c },
5015 { 0x1a, 0x411111f0 }, 5065 { 0x1a, 0x411111f0 },
5016 { 0x1b, 0x411111f0 }, 5066 { 0x1b, 0x411111f0 },
5017 { 0x1d, 0x40f89b2d }, 5067 { 0x1d, 0x40f89b2d },
@@ -5043,6 +5093,10 @@ static const struct hda_fixup alc269_fixups[] = {
5043 .chained = true, 5093 .chained = true,
5044 .chain_id = ALC280_FIXUP_HP_GPIO4 5094 .chain_id = ALC280_FIXUP_HP_GPIO4
5045 }, 5095 },
5096 [ALC280_FIXUP_HP_9480M] = {
5097 .type = HDA_FIXUP_FUNC,
5098 .v.func = alc280_fixup_hp_9480m,
5099 },
5046 [ALC288_FIXUP_DELL_HEADSET_MODE] = { 5100 [ALC288_FIXUP_DELL_HEADSET_MODE] = {
5047 .type = HDA_FIXUP_FUNC, 5101 .type = HDA_FIXUP_FUNC,
5048 .v.func = alc_fixup_headset_mode_dell_alc288, 5102 .v.func = alc_fixup_headset_mode_dell_alc288,
@@ -5131,9 +5185,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5131 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5185 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5132 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5186 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5133 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5187 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5188 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5134 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5189 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5135 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5190 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5136 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5191 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5193 SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5137 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5194 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5138 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5195 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5139 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5196 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5161,6 +5218,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5161 SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5218 SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5162 SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5219 SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5163 SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 5220 SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5221 SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
5164 SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5222 SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5165 SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 5223 SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
5166 /* ALC290 */ 5224 /* ALC290 */
@@ -5234,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5234 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), 5292 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
5235 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 5293 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
5236 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), 5294 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
5295 SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
5237 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5296 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
5238 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5297 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
5239 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5298 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5343,8 +5402,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5343 {0x19, 0x411111f0}, \ 5402 {0x19, 0x411111f0}, \
5344 {0x1a, 0x411111f0}, \ 5403 {0x1a, 0x411111f0}, \
5345 {0x1b, 0x411111f0}, \ 5404 {0x1b, 0x411111f0}, \
5346 {0x1d, 0x40700001}, \
5347 {0x1e, 0x411111f0}, \
5348 {0x21, 0x02211020} 5405 {0x21, 0x02211020}
5349 5406
5350#define ALC282_STANDARD_PINS \ 5407#define ALC282_STANDARD_PINS \
@@ -5375,8 +5432,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5375 {0x15, 0x0221401f}, \ 5432 {0x15, 0x0221401f}, \
5376 {0x1a, 0x411111f0}, \ 5433 {0x1a, 0x411111f0}, \
5377 {0x1b, 0x411111f0}, \ 5434 {0x1b, 0x411111f0}, \
5378 {0x1d, 0x40700001}, \ 5435 {0x1d, 0x40700001}
5379 {0x1e, 0x411111f0}
5380 5436
5381#define ALC298_STANDARD_PINS \ 5437#define ALC298_STANDARD_PINS \
5382 {0x18, 0x411111f0}, \ 5438 {0x18, 0x411111f0}, \
@@ -5408,6 +5464,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5408 {0x1d, 0x40700001}, 5464 {0x1d, 0x40700001},
5409 {0x21, 0x02211030}), 5465 {0x21, 0x02211030}),
5410 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5466 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5467 {0x12, 0x40000000},
5468 {0x14, 0x90170130},
5469 {0x17, 0x411111f0},
5470 {0x18, 0x411111f0},
5471 {0x19, 0x411111f0},
5472 {0x1a, 0x411111f0},
5473 {0x1b, 0x01014020},
5474 {0x1d, 0x4054c029},
5475 {0x1e, 0x411111f0},
5476 {0x21, 0x0221103f}),
5477 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5478 {0x12, 0x40000000},
5479 {0x14, 0x90170150},
5480 {0x17, 0x411111f0},
5481 {0x18, 0x411111f0},
5482 {0x19, 0x411111f0},
5483 {0x1a, 0x411111f0},
5484 {0x1b, 0x02011020},
5485 {0x1d, 0x4054c029},
5486 {0x1e, 0x411111f0},
5487 {0x21, 0x0221105f}),
5488 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5489 {0x12, 0x40000000},
5490 {0x14, 0x90170110},
5491 {0x17, 0x411111f0},
5492 {0x18, 0x411111f0},
5493 {0x19, 0x411111f0},
5494 {0x1a, 0x411111f0},
5495 {0x1b, 0x01014020},
5496 {0x1d, 0x4054c029},
5497 {0x1e, 0x411111f0},
5498 {0x21, 0x0221101f}),
5499 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5411 {0x12, 0x90a60160}, 5500 {0x12, 0x90a60160},
5412 {0x14, 0x90170120}, 5501 {0x14, 0x90170120},
5413 {0x17, 0x90170140}, 5502 {0x17, 0x90170140},
@@ -5469,10 +5558,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5469 {0x21, 0x02211030}), 5558 {0x21, 0x02211030}),
5470 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5559 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5471 ALC256_STANDARD_PINS, 5560 ALC256_STANDARD_PINS,
5472 {0x13, 0x40000000}), 5561 {0x13, 0x40000000},
5562 {0x1d, 0x40700001},
5563 {0x1e, 0x411111f0}),
5564 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5565 ALC256_STANDARD_PINS,
5566 {0x13, 0x411111f0},
5567 {0x1d, 0x40700001},
5568 {0x1e, 0x411111f0}),
5473 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5569 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5474 ALC256_STANDARD_PINS, 5570 ALC256_STANDARD_PINS,
5475 {0x13, 0x411111f0}), 5571 {0x13, 0x411111f0},
5572 {0x1d, 0x4077992d},
5573 {0x1e, 0x411111ff}),
5476 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5574 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5477 {0x12, 0x90a60130}, 5575 {0x12, 0x90a60130},
5478 {0x13, 0x40000000}, 5576 {0x13, 0x40000000},
@@ -5635,35 +5733,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5635 {0x13, 0x411111f0}, 5733 {0x13, 0x411111f0},
5636 {0x16, 0x01014020}, 5734 {0x16, 0x01014020},
5637 {0x18, 0x411111f0}, 5735 {0x18, 0x411111f0},
5638 {0x19, 0x01a19030}), 5736 {0x19, 0x01a19030},
5737 {0x1e, 0x411111f0}),
5639 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, 5738 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
5640 ALC292_STANDARD_PINS, 5739 ALC292_STANDARD_PINS,
5641 {0x12, 0x90a60140}, 5740 {0x12, 0x90a60140},
5642 {0x13, 0x411111f0}, 5741 {0x13, 0x411111f0},
5643 {0x16, 0x01014020}, 5742 {0x16, 0x01014020},
5644 {0x18, 0x02a19031}, 5743 {0x18, 0x02a19031},
5645 {0x19, 0x01a1903e}), 5744 {0x19, 0x01a1903e},
5745 {0x1e, 0x411111f0}),
5646 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, 5746 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5647 ALC292_STANDARD_PINS, 5747 ALC292_STANDARD_PINS,
5648 {0x12, 0x90a60140}, 5748 {0x12, 0x90a60140},
5649 {0x13, 0x411111f0}, 5749 {0x13, 0x411111f0},
5650 {0x16, 0x411111f0}, 5750 {0x16, 0x411111f0},
5651 {0x18, 0x411111f0}, 5751 {0x18, 0x411111f0},
5652 {0x19, 0x411111f0}), 5752 {0x19, 0x411111f0},
5753 {0x1e, 0x411111f0}),
5653 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5754 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5654 ALC292_STANDARD_PINS, 5755 ALC292_STANDARD_PINS,
5655 {0x12, 0x40000000}, 5756 {0x12, 0x40000000},
5656 {0x13, 0x90a60140}, 5757 {0x13, 0x90a60140},
5657 {0x16, 0x21014020}, 5758 {0x16, 0x21014020},
5658 {0x18, 0x411111f0}, 5759 {0x18, 0x411111f0},
5659 {0x19, 0x21a19030}), 5760 {0x19, 0x21a19030},
5761 {0x1e, 0x411111f0}),
5660 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5762 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5661 ALC292_STANDARD_PINS, 5763 ALC292_STANDARD_PINS,
5662 {0x12, 0x40000000}, 5764 {0x12, 0x40000000},
5663 {0x13, 0x90a60140}, 5765 {0x13, 0x90a60140},
5664 {0x16, 0x411111f0}, 5766 {0x16, 0x411111f0},
5665 {0x18, 0x411111f0}, 5767 {0x18, 0x411111f0},
5666 {0x19, 0x411111f0}), 5768 {0x19, 0x411111f0},
5769 {0x1e, 0x411111f0}),
5770 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5771 ALC292_STANDARD_PINS,
5772 {0x12, 0x40000000},
5773 {0x13, 0x90a60140},
5774 {0x16, 0x21014020},
5775 {0x18, 0x411111f0},
5776 {0x19, 0x21a19030},
5777 {0x1e, 0x411111ff}),
5667 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5778 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
5668 ALC298_STANDARD_PINS, 5779 ALC298_STANDARD_PINS,
5669 {0x12, 0x90a60130}, 5780 {0x12, 0x90a60130},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index dcc7fe91244c..9d947aef2c8b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a, 2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
2921 "HP Mini", STAC_92HD83XXX_HP_LED), 2921 "HP Mini", STAC_92HD83XXX_HP_LED),
2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP), 2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
2923 SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91, 2923 /* match both for 0xfa91 and 0xfa93 */
2924 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
2924 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD), 2925 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
2925 {} /* terminator */ 2926 {} /* terminator */
2926}; 2927};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 6492bca8c70f..4ca12665ff73 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
88 int changed; 88 int changed;
89 89
90 mutex_lock(&chip->mutex); 90 mutex_lock(&chip->mutex);
91 changed = !value->value.integer.value[0] != chip->dac_mute; 91 changed = (!value->value.integer.value[0]) != chip->dac_mute;
92 if (changed) { 92 if (changed) {
93 chip->dac_mute = !value->value.integer.value[0]; 93 chip->dac_mute = !value->value.integer.value[0];
94 chip->model.update_dac_mute(chip); 94 chip->model.update_dac_mute(chip);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 2ae9619443d1..225bfda414e9 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
30 bool 30 bool
31 select SND_DMAENGINE_PCM 31 select SND_DMAENGINE_PCM
32 32
33config SND_SOC_TOPOLOGY
34 bool
35
33# All the supported SoCs 36# All the supported SoCs
34source "sound/soc/adi/Kconfig" 37source "sound/soc/adi/Kconfig"
35source "sound/soc/atmel/Kconfig" 38source "sound/soc/atmel/Kconfig"
@@ -54,6 +57,7 @@ source "sound/soc/samsung/Kconfig"
54source "sound/soc/sh/Kconfig" 57source "sound/soc/sh/Kconfig"
55source "sound/soc/sirf/Kconfig" 58source "sound/soc/sirf/Kconfig"
56source "sound/soc/spear/Kconfig" 59source "sound/soc/spear/Kconfig"
60source "sound/soc/sti/Kconfig"
57source "sound/soc/tegra/Kconfig" 61source "sound/soc/tegra/Kconfig"
58source "sound/soc/txx9/Kconfig" 62source "sound/soc/txx9/Kconfig"
59source "sound/soc/ux500/Kconfig" 63source "sound/soc/ux500/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index e189903fabf4..134aca150a50 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,9 @@
1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o 1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o 2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
3
4ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
3snd-soc-core-objs += soc-topology.o 5snd-soc-core-objs += soc-topology.o
6endif
4 7
5ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),) 8ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
6snd-soc-core-objs += soc-generic-dmaengine-pcm.o 9snd-soc-core-objs += soc-generic-dmaengine-pcm.o
@@ -36,6 +39,7 @@ obj-$(CONFIG_SND_SOC) += samsung/
36obj-$(CONFIG_SND_SOC) += sh/ 39obj-$(CONFIG_SND_SOC) += sh/
37obj-$(CONFIG_SND_SOC) += sirf/ 40obj-$(CONFIG_SND_SOC) += sirf/
38obj-$(CONFIG_SND_SOC) += spear/ 41obj-$(CONFIG_SND_SOC) += spear/
42obj-$(CONFIG_SND_SOC) += sti/
39obj-$(CONFIG_SND_SOC) += tegra/ 43obj-$(CONFIG_SND_SOC) += tegra/
40obj-$(CONFIG_SND_SOC) += txx9/ 44obj-$(CONFIG_SND_SOC) += txx9/
41obj-$(CONFIG_SND_SOC) += ux500/ 45obj-$(CONFIG_SND_SOC) += ux500/
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 841d05946b88..ba8def5665c4 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -290,7 +290,7 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
290 int dir, dir_mask; 290 int dir, dir_mask;
291 int ret; 291 int ret;
292 292
293 pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n", 293 pr_debug("atmel_ssc_startup: SSC_SR=0x%x\n",
294 ssc_readl(ssc_p->ssc->regs, SR)); 294 ssc_readl(ssc_p->ssc->regs, SR));
295 295
296 /* Enable PMC peripheral clock for this SSC */ 296 /* Enable PMC peripheral clock for this SSC */
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index dd94fea72d5d..5741c0aa6c03 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -344,14 +344,8 @@ static int au1xpsc_pcm_drvprobe(struct platform_device *pdev)
344 344
345 platform_set_drvdata(pdev, dmadata); 345 platform_set_drvdata(pdev, dmadata);
346 346
347 return snd_soc_register_platform(&pdev->dev, &au1xpsc_soc_platform); 347 return devm_snd_soc_register_platform(&pdev->dev,
348} 348 &au1xpsc_soc_platform);
349
350static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
351{
352 snd_soc_unregister_platform(&pdev->dev);
353
354 return 0;
355} 349}
356 350
357static struct platform_driver au1xpsc_pcm_driver = { 351static struct platform_driver au1xpsc_pcm_driver = {
@@ -359,7 +353,6 @@ static struct platform_driver au1xpsc_pcm_driver = {
359 .name = "au1xpsc-pcm", 353 .name = "au1xpsc-pcm",
360 }, 354 },
361 .probe = au1xpsc_pcm_drvprobe, 355 .probe = au1xpsc_pcm_drvprobe,
362 .remove = au1xpsc_pcm_drvremove,
363}; 356};
364 357
365module_platform_driver(au1xpsc_pcm_driver); 358module_platform_driver(au1xpsc_pcm_driver);
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 24cc7f40d87a..fcf5a9adde81 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -312,14 +312,8 @@ static int alchemy_pcm_drvprobe(struct platform_device *pdev)
312 312
313 platform_set_drvdata(pdev, ctx); 313 platform_set_drvdata(pdev, ctx);
314 314
315 return snd_soc_register_platform(&pdev->dev, &alchemy_pcm_soc_platform); 315 return devm_snd_soc_register_platform(&pdev->dev,
316} 316 &alchemy_pcm_soc_platform);
317
318static int alchemy_pcm_drvremove(struct platform_device *pdev)
319{
320 snd_soc_unregister_platform(&pdev->dev);
321
322 return 0;
323} 317}
324 318
325static struct platform_driver alchemy_pcmdma_driver = { 319static struct platform_driver alchemy_pcmdma_driver = {
@@ -327,7 +321,6 @@ static struct platform_driver alchemy_pcmdma_driver = {
327 .name = "alchemy-pcm-dma", 321 .name = "alchemy-pcm-dma",
328 }, 322 },
329 .probe = alchemy_pcm_drvprobe, 323 .probe = alchemy_pcm_drvprobe,
330 .remove = alchemy_pcm_drvremove,
331}; 324};
332 325
333module_platform_driver(alchemy_pcmdma_driver); 326module_platform_driver(alchemy_pcmdma_driver);
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index e742ef668496..38e853add96e 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -305,19 +305,9 @@ static int au1xpsc_i2s_drvprobe(struct platform_device *pdev)
305 return -ENOMEM; 305 return -ENOMEM;
306 306
307 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 307 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
308 if (!iores) 308 wd->mmio = devm_ioremap_resource(&pdev->dev, iores);
309 return -ENODEV; 309 if (IS_ERR(wd->mmio))
310 310 return PTR_ERR(wd->mmio);
311 ret = -EBUSY;
312 if (!devm_request_mem_region(&pdev->dev, iores->start,
313 resource_size(iores),
314 pdev->name))
315 return -EBUSY;
316
317 wd->mmio = devm_ioremap(&pdev->dev, iores->start,
318 resource_size(iores));
319 if (!wd->mmio)
320 return -EBUSY;
321 311
322 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); 312 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
323 if (!dmares) 313 if (!dmares)
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 03fa1cbf8ec1..8c435beb263d 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -862,6 +862,8 @@ static const struct of_device_id bcm2835_i2s_of_match[] = {
862 {}, 862 {},
863}; 863};
864 864
865MODULE_DEVICE_TABLE(of, bcm2835_i2s_of_match);
866
865static struct platform_driver bcm2835_i2s_driver = { 867static struct platform_driver bcm2835_i2s_driver = {
866 .probe = bcm2835_i2s_probe, 868 .probe = bcm2835_i2s_probe,
867 .driver = { 869 .driver = {
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 238913e030e0..02ad2606fa19 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -450,13 +450,8 @@ static struct snd_soc_platform_driver bf5xx_ac97_soc_platform = {
450 450
451static int bf5xx_soc_platform_probe(struct platform_device *pdev) 451static int bf5xx_soc_platform_probe(struct platform_device *pdev)
452{ 452{
453 return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform); 453 return devm_snd_soc_register_platform(&pdev->dev,
454} 454 &bf5xx_ac97_soc_platform);
455
456static int bf5xx_soc_platform_remove(struct platform_device *pdev)
457{
458 snd_soc_unregister_platform(&pdev->dev);
459 return 0;
460} 455}
461 456
462static struct platform_driver bf5xx_pcm_driver = { 457static struct platform_driver bf5xx_pcm_driver = {
@@ -465,7 +460,6 @@ static struct platform_driver bf5xx_pcm_driver = {
465 }, 460 },
466 461
467 .probe = bf5xx_soc_platform_probe, 462 .probe = bf5xx_soc_platform_probe,
468 .remove = bf5xx_soc_platform_remove,
469}; 463};
470 464
471module_platform_driver(bf5xx_pcm_driver); 465module_platform_driver(bf5xx_pcm_driver);
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index d95477afcc67..6cba211da32e 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -342,13 +342,8 @@ static struct snd_soc_platform_driver bf5xx_i2s_soc_platform = {
342 342
343static int bfin_i2s_soc_platform_probe(struct platform_device *pdev) 343static int bfin_i2s_soc_platform_probe(struct platform_device *pdev)
344{ 344{
345 return snd_soc_register_platform(&pdev->dev, &bf5xx_i2s_soc_platform); 345 return devm_snd_soc_register_platform(&pdev->dev,
346} 346 &bf5xx_i2s_soc_platform);
347
348static int bfin_i2s_soc_platform_remove(struct platform_device *pdev)
349{
350 snd_soc_unregister_platform(&pdev->dev);
351 return 0;
352} 347}
353 348
354static struct platform_driver bfin_i2s_pcm_driver = { 349static struct platform_driver bfin_i2s_pcm_driver = {
@@ -357,7 +352,6 @@ static struct platform_driver bfin_i2s_pcm_driver = {
357 }, 352 },
358 353
359 .probe = bfin_i2s_soc_platform_probe, 354 .probe = bfin_i2s_soc_platform_probe,
360 .remove = bfin_i2s_soc_platform_remove,
361}; 355};
362 356
363module_platform_driver(bfin_i2s_pcm_driver); 357module_platform_driver(bfin_i2s_pcm_driver);
diff --git a/sound/soc/blackfin/bfin-eval-adau1x61.c b/sound/soc/blackfin/bfin-eval-adau1x61.c
index 4229f76daec9..fddfe00c9d69 100644
--- a/sound/soc/blackfin/bfin-eval-adau1x61.c
+++ b/sound/soc/blackfin/bfin-eval-adau1x61.c
@@ -108,6 +108,7 @@ static struct snd_soc_dai_link bfin_eval_adau1x61_dai = {
108 108
109static struct snd_soc_card bfin_eval_adau1x61 = { 109static struct snd_soc_card bfin_eval_adau1x61 = {
110 .name = "bfin-eval-adau1x61", 110 .name = "bfin-eval-adau1x61",
111 .owner = THIS_MODULE,
111 .driver_name = "eval-adau1x61", 112 .driver_name = "eval-adau1x61",
112 .dai_link = &bfin_eval_adau1x61_dai, 113 .dai_link = &bfin_eval_adau1x61_dai,
113 .num_links = 1, 114 .num_links = 1,
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 38b3dad9d48a..4d91a6aa696b 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1028,10 +1028,8 @@ static int pm860x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
1028 1028
1029 if (dir == PM860X_CLK_DIR_OUT) 1029 if (dir == PM860X_CLK_DIR_OUT)
1030 pm860x->dir = PM860X_CLK_DIR_OUT; 1030 pm860x->dir = PM860X_CLK_DIR_OUT;
1031 else { 1031 else /* Slave mode is not supported */
1032 pm860x->dir = PM860X_CLK_DIR_IN;
1033 return -EINVAL; 1032 return -EINVAL;
1034 }
1035 1033
1036 return 0; 1034 return 0;
1037} 1035}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 76125a281557..0c9733ecd17f 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -53,6 +53,7 @@ config SND_SOC_ALL_CODECS
53 select SND_SOC_CS4271_I2C if I2C 53 select SND_SOC_CS4271_I2C if I2C
54 select SND_SOC_CS4271_SPI if SPI_MASTER 54 select SND_SOC_CS4271_SPI if SPI_MASTER
55 select SND_SOC_CS42XX8_I2C if I2C 55 select SND_SOC_CS42XX8_I2C if I2C
56 select SND_SOC_CS4349 if I2C
56 select SND_SOC_CX20442 if TTY 57 select SND_SOC_CX20442 if TTY
57 select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI 58 select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
58 select SND_SOC_DA7213 if I2C 59 select SND_SOC_DA7213 if I2C
@@ -62,6 +63,8 @@ config SND_SOC_ALL_CODECS
62 select SND_SOC_BT_SCO 63 select SND_SOC_BT_SCO
63 select SND_SOC_ES8328_SPI if SPI_MASTER 64 select SND_SOC_ES8328_SPI if SPI_MASTER
64 select SND_SOC_ES8328_I2C if I2C 65 select SND_SOC_ES8328_I2C if I2C
66 select SND_SOC_GTM601
67 select SND_SOC_ICS43432
65 select SND_SOC_ISABELLE if I2C 68 select SND_SOC_ISABELLE if I2C
66 select SND_SOC_JZ4740_CODEC 69 select SND_SOC_JZ4740_CODEC
67 select SND_SOC_LM4857 if I2C 70 select SND_SOC_LM4857 if I2C
@@ -103,6 +106,7 @@ config SND_SOC_ALL_CODECS
103 select SND_SOC_STA350 if I2C 106 select SND_SOC_STA350 if I2C
104 select SND_SOC_STA529 if I2C 107 select SND_SOC_STA529 if I2C
105 select SND_SOC_STAC9766 if SND_SOC_AC97_BUS 108 select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
109 select SND_SOC_STI_SAS
106 select SND_SOC_TAS2552 if I2C 110 select SND_SOC_TAS2552 if I2C
107 select SND_SOC_TAS5086 if I2C 111 select SND_SOC_TAS5086 if I2C
108 select SND_SOC_TAS571X if I2C 112 select SND_SOC_TAS571X if I2C
@@ -404,6 +408,11 @@ config SND_SOC_CS42XX8_I2C
404 select SND_SOC_CS42XX8 408 select SND_SOC_CS42XX8
405 select REGMAP_I2C 409 select REGMAP_I2C
406 410
411# Cirrus Logic CS4349 HiFi DAC
412config SND_SOC_CS4349
413 tristate "Cirrus Logic CS4349 CODEC"
414 depends on I2C
415
407config SND_SOC_CX20442 416config SND_SOC_CX20442
408 tristate 417 tristate
409 depends on TTY 418 depends on TTY
@@ -447,6 +456,12 @@ config SND_SOC_ES8328_SPI
447 tristate 456 tristate
448 select SND_SOC_ES8328 457 select SND_SOC_ES8328
449 458
459config SND_SOC_GTM601
460 tristate 'GTM601 UMTS modem audio codec'
461
462config SND_SOC_ICS43432
463 tristate
464
450config SND_SOC_ISABELLE 465config SND_SOC_ISABELLE
451 tristate 466 tristate
452 467
@@ -617,6 +632,9 @@ config SND_SOC_STA529
617config SND_SOC_STAC9766 632config SND_SOC_STAC9766
618 tristate 633 tristate
619 634
635config SND_SOC_STI_SAS
636 tristate "codec Audio support for STI SAS codec"
637
620config SND_SOC_TAS2552 638config SND_SOC_TAS2552
621 tristate "Texas Instruments TAS2552 Mono Audio amplifier" 639 tristate "Texas Instruments TAS2552 Mono Audio amplifier"
622 depends on I2C 640 depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 3b58c4571859..4a32077954ae 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -45,6 +45,7 @@ snd-soc-cs4271-i2c-objs := cs4271-i2c.o
45snd-soc-cs4271-spi-objs := cs4271-spi.o 45snd-soc-cs4271-spi-objs := cs4271-spi.o
46snd-soc-cs42xx8-objs := cs42xx8.o 46snd-soc-cs42xx8-objs := cs42xx8.o
47snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o 47snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
48snd-soc-cs4349-objs := cs4349.o
48snd-soc-cx20442-objs := cx20442.o 49snd-soc-cx20442-objs := cx20442.o
49snd-soc-da7210-objs := da7210.o 50snd-soc-da7210-objs := da7210.o
50snd-soc-da7213-objs := da7213.o 51snd-soc-da7213-objs := da7213.o
@@ -55,6 +56,8 @@ snd-soc-dmic-objs := dmic.o
55snd-soc-es8328-objs := es8328.o 56snd-soc-es8328-objs := es8328.o
56snd-soc-es8328-i2c-objs := es8328-i2c.o 57snd-soc-es8328-i2c-objs := es8328-i2c.o
57snd-soc-es8328-spi-objs := es8328-spi.o 58snd-soc-es8328-spi-objs := es8328-spi.o
59snd-soc-gtm601-objs := gtm601.o
60snd-soc-ics43432-objs := ics43432.o
58snd-soc-isabelle-objs := isabelle.o 61snd-soc-isabelle-objs := isabelle.o
59snd-soc-jz4740-codec-objs := jz4740.o 62snd-soc-jz4740-codec-objs := jz4740.o
60snd-soc-l3-objs := l3.o 63snd-soc-l3-objs := l3.o
@@ -107,6 +110,7 @@ snd-soc-sta32x-objs := sta32x.o
107snd-soc-sta350-objs := sta350.o 110snd-soc-sta350-objs := sta350.o
108snd-soc-sta529-objs := sta529.o 111snd-soc-sta529-objs := sta529.o
109snd-soc-stac9766-objs := stac9766.o 112snd-soc-stac9766-objs := stac9766.o
113snd-soc-sti-sas-objs := sti-sas.o
110snd-soc-tas5086-objs := tas5086.o 114snd-soc-tas5086-objs := tas5086.o
111snd-soc-tas571x-objs := tas571x.o 115snd-soc-tas571x-objs := tas571x.o
112snd-soc-tfa9879-objs := tfa9879.o 116snd-soc-tfa9879-objs := tfa9879.o
@@ -233,6 +237,7 @@ obj-$(CONFIG_SND_SOC_CS4271_I2C) += snd-soc-cs4271-i2c.o
233obj-$(CONFIG_SND_SOC_CS4271_SPI) += snd-soc-cs4271-spi.o 237obj-$(CONFIG_SND_SOC_CS4271_SPI) += snd-soc-cs4271-spi.o
234obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o 238obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o
235obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o 239obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
240obj-$(CONFIG_SND_SOC_CS4349) += snd-soc-cs4349.o
236obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o 241obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
237obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o 242obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
238obj-$(CONFIG_SND_SOC_DA7213) += snd-soc-da7213.o 243obj-$(CONFIG_SND_SOC_DA7213) += snd-soc-da7213.o
@@ -243,6 +248,8 @@ obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
243obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o 248obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
244obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o 249obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
245obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o 250obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
251obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o
252obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o
246obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o 253obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
247obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o 254obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o
248obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o 255obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
@@ -291,6 +298,7 @@ obj-$(CONFIG_SND_SOC_STA32X) += snd-soc-sta32x.o
291obj-$(CONFIG_SND_SOC_STA350) += snd-soc-sta350.o 298obj-$(CONFIG_SND_SOC_STA350) += snd-soc-sta350.o
292obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o 299obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o
293obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o 300obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
301obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o
294obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o 302obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
295obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o 303obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
296obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o 304obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 3cc69a626454..9ef20dbccbe3 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -202,19 +202,21 @@ static struct snd_soc_dai_driver ad1980_dai = {
202 .formats = SND_SOC_STD_AC97_FMTS, }, 202 .formats = SND_SOC_STD_AC97_FMTS, },
203}; 203};
204 204
205#define AD1980_VENDOR_ID 0x41445300
206#define AD1980_VENDOR_MASK 0xffffff00
207
205static int ad1980_reset(struct snd_soc_codec *codec, int try_warm) 208static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
206{ 209{
207 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec); 210 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
208 unsigned int retry_cnt = 0; 211 unsigned int retry_cnt = 0;
212 int ret;
209 213
210 do { 214 do {
211 if (try_warm && soc_ac97_ops->warm_reset) { 215 ret = snd_ac97_reset(ac97, true, AD1980_VENDOR_ID,
212 soc_ac97_ops->warm_reset(ac97); 216 AD1980_VENDOR_MASK);
213 if (snd_soc_read(codec, AC97_RESET) == 0x0090) 217 if (ret >= 0)
214 return 1; 218 return 0;
215 }
216 219
217 soc_ac97_ops->reset(ac97);
218 /* 220 /*
219 * Set bit 16slot in register 74h, then every slot will has only 221 * Set bit 16slot in register 74h, then every slot will has only
220 * 16 bits. This command is sent out in 20bit mode, in which 222 * 16 bits. This command is sent out in 20bit mode, in which
@@ -223,8 +225,6 @@ static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
223 */ 225 */
224 snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900); 226 snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
225 227
226 if (snd_soc_read(codec, AC97_RESET) == 0x0090)
227 return 0;
228 } while (retry_cnt++ < 10); 228 } while (retry_cnt++ < 10);
229 229
230 dev_err(codec->dev, "Failed to reset: AC97 link error\n"); 230 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
@@ -240,7 +240,7 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec)
240 u16 vendor_id2; 240 u16 vendor_id2;
241 u16 ext_status; 241 u16 ext_status;
242 242
243 ac97 = snd_soc_new_ac97_codec(codec); 243 ac97 = snd_soc_new_ac97_codec(codec, 0, 0);
244 if (IS_ERR(ac97)) { 244 if (IS_ERR(ac97)) {
245 ret = PTR_ERR(ac97); 245 ret = PTR_ERR(ac97);
246 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret); 246 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -260,22 +260,10 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec)
260 if (ret < 0) 260 if (ret < 0)
261 goto reset_err; 261 goto reset_err;
262 262
263 /* Read out vendor ID to make sure it is ad1980 */
264 if (snd_soc_read(codec, AC97_VENDOR_ID1) != 0x4144) {
265 ret = -ENODEV;
266 goto reset_err;
267 }
268
269 vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2); 263 vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2);
270 264 if (vendor_id2 == 0x5374) {
271 if (vendor_id2 != 0x5370) { 265 dev_warn(codec->dev,
272 if (vendor_id2 != 0x5374) { 266 "Found AD1981 - only 2/2 IN/OUT Channels supported\n");
273 ret = -ENODEV;
274 goto reset_err;
275 } else {
276 dev_warn(codec->dev,
277 "Found AD1981 - only 2/2 IN/OUT Channels supported\n");
278 }
279 } 267 }
280 268
281 /* unmute captures and playbacks volume */ 269 /* unmute captures and playbacks volume */
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 36d842570745..198c924551b7 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -113,7 +113,7 @@
113 113
114#define ADAV80X_PLL_OUTE_SYSCLKPD(x) BIT(2 - (x)) 114#define ADAV80X_PLL_OUTE_SYSCLKPD(x) BIT(2 - (x))
115 115
116static struct reg_default adav80x_reg_defaults[] = { 116static const struct reg_default adav80x_reg_defaults[] = {
117 { ADAV80X_PLAYBACK_CTRL, 0x01 }, 117 { ADAV80X_PLAYBACK_CTRL, 0x01 },
118 { ADAV80X_AUX_IN_CTRL, 0x01 }, 118 { ADAV80X_AUX_IN_CTRL, 0x01 },
119 { ADAV80X_REC_CTRL, 0x02 }, 119 { ADAV80X_REC_CTRL, 0x02 },
@@ -865,7 +865,6 @@ const struct regmap_config adav80x_regmap_config = {
865 .val_bits = 8, 865 .val_bits = 8,
866 .pad_bits = 1, 866 .pad_bits = 1,
867 .reg_bits = 7, 867 .reg_bits = 7,
868 .read_flag_mask = 0x01,
869 868
870 .max_register = ADAV80X_PLL_OUTE, 869 .max_register = ADAV80X_PLL_OUTE,
871 870
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 66352f70ac47..4a90143d0e90 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -64,12 +64,15 @@
64#define FIL1_0 0x1c 64#define FIL1_0 0x1c
65#define FIL1_1 0x1d 65#define FIL1_1 0x1d
66#define FIL1_2 0x1e 66#define FIL1_2 0x1e
67#define FIL1_3 0x1f 67#define FIL1_3 0x1f /* The maximum valid register for ak4642 */
68#define PW_MGMT4 0x20 68#define PW_MGMT4 0x20
69#define MD_CTL5 0x21 69#define MD_CTL5 0x21
70#define LO_MS 0x22 70#define LO_MS 0x22
71#define HP_MS 0x23 71#define HP_MS 0x23
72#define SPK_MS 0x24 72#define SPK_MS 0x24 /* The maximum valid register for ak4643 */
73#define EQ_FBEQAB 0x25
74#define EQ_FBEQCD 0x26
75#define EQ_FBEQE 0x27 /* The maximum valid register for ak4648 */
73 76
74/* PW_MGMT1*/ 77/* PW_MGMT1*/
75#define PMVCM (1 << 6) /* VCOM Power Management */ 78#define PMVCM (1 << 6) /* VCOM Power Management */
@@ -241,7 +244,7 @@ static const struct snd_soc_dapm_route ak4642_intercon[] = {
241/* 244/*
242 * ak4642 register cache 245 * ak4642 register cache
243 */ 246 */
244static const struct reg_default ak4642_reg[] = { 247static const struct reg_default ak4643_reg[] = {
245 { 0, 0x00 }, { 1, 0x00 }, { 2, 0x01 }, { 3, 0x00 }, 248 { 0, 0x00 }, { 1, 0x00 }, { 2, 0x01 }, { 3, 0x00 },
246 { 4, 0x02 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 }, 249 { 4, 0x02 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 },
247 { 8, 0xe1 }, { 9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 }, 250 { 8, 0xe1 }, { 9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 },
@@ -254,6 +257,14 @@ static const struct reg_default ak4642_reg[] = {
254 { 36, 0x00 }, 257 { 36, 0x00 },
255}; 258};
256 259
260/* The default settings for 0x0 ~ 0x1f registers are the same for ak4642
261 and ak4643. So we reuse the ak4643 reg_default for ak4642.
262 The valid registers for ak4642 are 0x0 ~ 0x1f which is a subset of ak4643,
263 so define NUM_AK4642_REG_DEFAULTS for ak4642.
264*/
265#define ak4642_reg ak4643_reg
266#define NUM_AK4642_REG_DEFAULTS (FIL1_3 + 1)
267
257static const struct reg_default ak4648_reg[] = { 268static const struct reg_default ak4648_reg[] = {
258 { 0, 0x00 }, { 1, 0x00 }, { 2, 0x01 }, { 3, 0x00 }, 269 { 0, 0x00 }, { 1, 0x00 }, { 2, 0x01 }, { 3, 0x00 },
259 { 4, 0x02 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 }, 270 { 4, 0x02 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 },
@@ -535,15 +546,23 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
535static const struct regmap_config ak4642_regmap = { 546static const struct regmap_config ak4642_regmap = {
536 .reg_bits = 8, 547 .reg_bits = 8,
537 .val_bits = 8, 548 .val_bits = 8,
538 .max_register = ARRAY_SIZE(ak4642_reg) + 1, 549 .max_register = FIL1_3,
539 .reg_defaults = ak4642_reg, 550 .reg_defaults = ak4642_reg,
540 .num_reg_defaults = ARRAY_SIZE(ak4642_reg), 551 .num_reg_defaults = NUM_AK4642_REG_DEFAULTS,
552};
553
554static const struct regmap_config ak4643_regmap = {
555 .reg_bits = 8,
556 .val_bits = 8,
557 .max_register = SPK_MS,
558 .reg_defaults = ak4643_reg,
559 .num_reg_defaults = ARRAY_SIZE(ak4643_reg),
541}; 560};
542 561
543static const struct regmap_config ak4648_regmap = { 562static const struct regmap_config ak4648_regmap = {
544 .reg_bits = 8, 563 .reg_bits = 8,
545 .val_bits = 8, 564 .val_bits = 8,
546 .max_register = ARRAY_SIZE(ak4648_reg) + 1, 565 .max_register = EQ_FBEQE,
547 .reg_defaults = ak4648_reg, 566 .reg_defaults = ak4648_reg,
548 .num_reg_defaults = ARRAY_SIZE(ak4648_reg), 567 .num_reg_defaults = ARRAY_SIZE(ak4648_reg),
549}; 568};
@@ -553,7 +572,7 @@ static const struct ak4642_drvdata ak4642_drvdata = {
553}; 572};
554 573
555static const struct ak4642_drvdata ak4643_drvdata = { 574static const struct ak4642_drvdata ak4643_drvdata = {
556 .regmap_config = &ak4642_regmap, 575 .regmap_config = &ak4643_regmap,
557}; 576};
558 577
559static const struct ak4642_drvdata ak4648_drvdata = { 578static const struct ak4642_drvdata ak4648_drvdata = {
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 9277ac68b696..ef6de511dc7e 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -35,7 +35,7 @@
35/* 35/*
36 * ALC5632 register cache 36 * ALC5632 register cache
37 */ 37 */
38static struct reg_default alc5632_reg_defaults[] = { 38static const struct reg_default alc5632_reg_defaults[] = {
39 { 2, 0x8080 }, /* R2 - Speaker Output Volume */ 39 { 2, 0x8080 }, /* R2 - Speaker Output Volume */
40 { 4, 0x8080 }, /* R4 - Headphone Output Volume */ 40 { 4, 0x8080 }, /* R4 - Headphone Output Volume */
41 { 6, 0x8080 }, /* R6 - AUXOUT Volume */ 41 { 6, 0x8080 }, /* R6 - AUXOUT Volume */
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 802e05eae3e9..8a2221ab3d10 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1366,7 +1366,7 @@ static void arizona_wm5102_set_dac_comp(struct snd_soc_codec *codec,
1366{ 1366{
1367 struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); 1367 struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
1368 struct arizona *arizona = priv->arizona; 1368 struct arizona *arizona = priv->arizona;
1369 struct reg_default dac_comp[] = { 1369 struct reg_sequence dac_comp[] = {
1370 { 0x80, 0x3 }, 1370 { 0x80, 0x3 },
1371 { ARIZONA_DAC_COMP_1, 0 }, 1371 { ARIZONA_DAC_COMP_1, 0 },
1372 { ARIZONA_DAC_COMP_2, 0 }, 1372 { ARIZONA_DAC_COMP_2, 0 },
@@ -1504,7 +1504,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
1504 else 1504 else
1505 rates = &arizona_48k_bclk_rates[0]; 1505 rates = &arizona_48k_bclk_rates[0];
1506 1506
1507 wl = snd_pcm_format_width(params_format(params)); 1507 wl = params_width(params);
1508 1508
1509 if (tdm_slots) { 1509 if (tdm_slots) {
1510 arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n", 1510 arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n",
@@ -1756,17 +1756,6 @@ int arizona_init_dai(struct arizona_priv *priv, int id)
1756} 1756}
1757EXPORT_SYMBOL_GPL(arizona_init_dai); 1757EXPORT_SYMBOL_GPL(arizona_init_dai);
1758 1758
1759static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
1760{
1761 struct arizona_fll *fll = data;
1762
1763 arizona_fll_dbg(fll, "clock OK\n");
1764
1765 complete(&fll->ok);
1766
1767 return IRQ_HANDLED;
1768}
1769
1770static struct { 1759static struct {
1771 unsigned int min; 1760 unsigned int min;
1772 unsigned int max; 1761 unsigned int max;
@@ -2048,17 +2037,18 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll)
2048static int arizona_enable_fll(struct arizona_fll *fll) 2037static int arizona_enable_fll(struct arizona_fll *fll)
2049{ 2038{
2050 struct arizona *arizona = fll->arizona; 2039 struct arizona *arizona = fll->arizona;
2051 unsigned long time_left;
2052 bool use_sync = false; 2040 bool use_sync = false;
2053 int already_enabled = arizona_is_enabled_fll(fll); 2041 int already_enabled = arizona_is_enabled_fll(fll);
2054 struct arizona_fll_cfg cfg; 2042 struct arizona_fll_cfg cfg;
2043 int i;
2044 unsigned int val;
2055 2045
2056 if (already_enabled < 0) 2046 if (already_enabled < 0)
2057 return already_enabled; 2047 return already_enabled;
2058 2048
2059 if (already_enabled) { 2049 if (already_enabled) {
2060 /* Facilitate smooth refclk across the transition */ 2050 /* Facilitate smooth refclk across the transition */
2061 regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7, 2051 regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
2062 ARIZONA_FLL1_GAIN_MASK, 0); 2052 ARIZONA_FLL1_GAIN_MASK, 0);
2063 regmap_update_bits_async(fll->arizona->regmap, fll->base + 1, 2053 regmap_update_bits_async(fll->arizona->regmap, fll->base + 1,
2064 ARIZONA_FLL1_FREERUN, 2054 ARIZONA_FLL1_FREERUN,
@@ -2110,9 +2100,6 @@ static int arizona_enable_fll(struct arizona_fll *fll)
2110 if (!already_enabled) 2100 if (!already_enabled)
2111 pm_runtime_get(arizona->dev); 2101 pm_runtime_get(arizona->dev);
2112 2102
2113 /* Clear any pending completions */
2114 try_wait_for_completion(&fll->ok);
2115
2116 regmap_update_bits_async(arizona->regmap, fll->base + 1, 2103 regmap_update_bits_async(arizona->regmap, fll->base + 1,
2117 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA); 2104 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
2118 if (use_sync) 2105 if (use_sync)
@@ -2124,10 +2111,24 @@ static int arizona_enable_fll(struct arizona_fll *fll)
2124 regmap_update_bits_async(arizona->regmap, fll->base + 1, 2111 regmap_update_bits_async(arizona->regmap, fll->base + 1,
2125 ARIZONA_FLL1_FREERUN, 0); 2112 ARIZONA_FLL1_FREERUN, 0);
2126 2113
2127 time_left = wait_for_completion_timeout(&fll->ok, 2114 arizona_fll_dbg(fll, "Waiting for FLL lock...\n");
2128 msecs_to_jiffies(250)); 2115 val = 0;
2129 if (time_left == 0) 2116 for (i = 0; i < 15; i++) {
2117 if (i < 5)
2118 usleep_range(200, 400);
2119 else
2120 msleep(20);
2121
2122 regmap_read(arizona->regmap,
2123 ARIZONA_INTERRUPT_RAW_STATUS_5,
2124 &val);
2125 if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1)))
2126 break;
2127 }
2128 if (i == 15)
2130 arizona_fll_warn(fll, "Timed out waiting for lock\n"); 2129 arizona_fll_warn(fll, "Timed out waiting for lock\n");
2130 else
2131 arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i);
2131 2132
2132 return 0; 2133 return 0;
2133} 2134}
@@ -2212,11 +2213,8 @@ EXPORT_SYMBOL_GPL(arizona_set_fll);
2212int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq, 2213int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
2213 int ok_irq, struct arizona_fll *fll) 2214 int ok_irq, struct arizona_fll *fll)
2214{ 2215{
2215 int ret;
2216 unsigned int val; 2216 unsigned int val;
2217 2217
2218 init_completion(&fll->ok);
2219
2220 fll->id = id; 2218 fll->id = id;
2221 fll->base = base; 2219 fll->base = base;
2222 fll->arizona = arizona; 2220 fll->arizona = arizona;
@@ -2238,13 +2236,6 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
2238 snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name), 2236 snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
2239 "FLL%d clock OK", id); 2237 "FLL%d clock OK", id);
2240 2238
2241 ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
2242 arizona_fll_clock_ok, fll);
2243 if (ret != 0) {
2244 dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
2245 id, ret);
2246 }
2247
2248 regmap_update_bits(arizona->regmap, fll->base + 1, 2239 regmap_update_bits(arizona->regmap, fll->base + 1,
2249 ARIZONA_FLL1_FREERUN, 0); 2240 ARIZONA_FLL1_FREERUN, 0);
2250 2241
@@ -2313,6 +2304,82 @@ const struct snd_kcontrol_new arizona_adsp2_rate_controls[] = {
2313}; 2304};
2314EXPORT_SYMBOL_GPL(arizona_adsp2_rate_controls); 2305EXPORT_SYMBOL_GPL(arizona_adsp2_rate_controls);
2315 2306
2307static bool arizona_eq_filter_unstable(bool mode, __be16 _a, __be16 _b)
2308{
2309 s16 a = be16_to_cpu(_a);
2310 s16 b = be16_to_cpu(_b);
2311
2312 if (!mode) {
2313 return abs(a) >= 4096;
2314 } else {
2315 if (abs(b) >= 4096)
2316 return true;
2317
2318 return (abs((a << 16) / (4096 - b)) >= 4096 << 4);
2319 }
2320}
2321
2322int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
2323 struct snd_ctl_elem_value *ucontrol)
2324{
2325 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
2326 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
2327 struct soc_bytes *params = (void *)kcontrol->private_value;
2328 unsigned int val;
2329 __be16 *data;
2330 int len;
2331 int ret;
2332
2333 len = params->num_regs * regmap_get_val_bytes(arizona->regmap);
2334
2335 data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
2336 if (!data)
2337 return -ENOMEM;
2338
2339 data[0] &= cpu_to_be16(ARIZONA_EQ1_B1_MODE);
2340
2341 if (arizona_eq_filter_unstable(!!data[0], data[1], data[2]) ||
2342 arizona_eq_filter_unstable(true, data[4], data[5]) ||
2343 arizona_eq_filter_unstable(true, data[8], data[9]) ||
2344 arizona_eq_filter_unstable(true, data[12], data[13]) ||
2345 arizona_eq_filter_unstable(false, data[16], data[17])) {
2346 dev_err(arizona->dev, "Rejecting unstable EQ coefficients\n");
2347 ret = -EINVAL;
2348 goto out;
2349 }
2350
2351 ret = regmap_read(arizona->regmap, params->base, &val);
2352 if (ret != 0)
2353 goto out;
2354
2355 val &= ~ARIZONA_EQ1_B1_MODE;
2356 data[0] |= cpu_to_be16(val);
2357
2358 ret = regmap_raw_write(arizona->regmap, params->base, data, len);
2359
2360out:
2361 kfree(data);
2362 return ret;
2363}
2364EXPORT_SYMBOL_GPL(arizona_eq_coeff_put);
2365
2366int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
2367 struct snd_ctl_elem_value *ucontrol)
2368{
2369 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
2370 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
2371 __be16 *data = (__be16 *)ucontrol->value.bytes.data;
2372 s16 val = be16_to_cpu(*data);
2373
2374 if (abs(val) >= 4096) {
2375 dev_err(arizona->dev, "Rejecting unstable LHPF coefficients\n");
2376 return -EINVAL;
2377 }
2378
2379 return snd_soc_bytes_put(kcontrol, ucontrol);
2380}
2381EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
2382
2316MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support"); 2383MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
2317MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); 2384MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
2318MODULE_LICENSE("GPL"); 2385MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 43deb0462309..ada0a418ff4b 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -194,6 +194,20 @@ extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
194 ARIZONA_MIXER_ROUTES(name " Preloader", name "L"), \ 194 ARIZONA_MIXER_ROUTES(name " Preloader", name "L"), \
195 ARIZONA_MIXER_ROUTES(name " Preloader", name "R") 195 ARIZONA_MIXER_ROUTES(name " Preloader", name "R")
196 196
197#define ARIZONA_EQ_CONTROL(xname, xbase) \
198{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
199 .info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
200 .put = arizona_eq_coeff_put, .private_value = \
201 ((unsigned long)&(struct soc_bytes) { .base = xbase, \
202 .num_regs = 20, .mask = ~ARIZONA_EQ1_B1_MODE }) }
203
204#define ARIZONA_LHPF_CONTROL(xname, xbase) \
205{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
206 .info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
207 .put = arizona_lhpf_coeff_put, .private_value = \
208 ((unsigned long)&(struct soc_bytes) { .base = xbase, \
209 .num_regs = 1 }) }
210
197#define ARIZONA_RATE_ENUM_SIZE 4 211#define ARIZONA_RATE_ENUM_SIZE 4
198extern const char *arizona_rate_text[ARIZONA_RATE_ENUM_SIZE]; 212extern const char *arizona_rate_text[ARIZONA_RATE_ENUM_SIZE];
199extern const int arizona_rate_val[ARIZONA_RATE_ENUM_SIZE]; 213extern const int arizona_rate_val[ARIZONA_RATE_ENUM_SIZE];
@@ -229,6 +243,11 @@ extern int arizona_hp_ev(struct snd_soc_dapm_widget *w,
229 struct snd_kcontrol *kcontrol, 243 struct snd_kcontrol *kcontrol,
230 int event); 244 int event);
231 245
246extern int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
247 struct snd_ctl_elem_value *ucontrol);
248extern int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
249 struct snd_ctl_elem_value *ucontrol);
250
232extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id, 251extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
233 int source, unsigned int freq, int dir); 252 int source, unsigned int freq, int dir);
234 253
@@ -242,7 +261,6 @@ struct arizona_fll {
242 int id; 261 int id;
243 unsigned int base; 262 unsigned int base;
244 unsigned int vco_mult; 263 unsigned int vco_mult;
245 struct completion ok;
246 264
247 unsigned int fout; 265 unsigned int fout;
248 int sync_src; 266 int sync_src;
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 76564dc752d3..44c30fe3e315 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -74,33 +74,8 @@ static const struct reg_default cs35l32_reg_defaults[] = {
74static bool cs35l32_readable_register(struct device *dev, unsigned int reg) 74static bool cs35l32_readable_register(struct device *dev, unsigned int reg)
75{ 75{
76 switch (reg) { 76 switch (reg) {
77 case CS35L32_DEVID_AB: 77 case CS35L32_DEVID_AB ... CS35L32_AUDIO_LED_MNGR:
78 case CS35L32_DEVID_CD: 78 case CS35L32_ADSP_CTL ... CS35L32_FLASH_INHIBIT:
79 case CS35L32_DEVID_E:
80 case CS35L32_FAB_ID:
81 case CS35L32_REV_ID:
82 case CS35L32_PWRCTL1:
83 case CS35L32_PWRCTL2:
84 case CS35L32_CLK_CTL:
85 case CS35L32_BATT_THRESHOLD:
86 case CS35L32_VMON:
87 case CS35L32_BST_CPCP_CTL:
88 case CS35L32_IMON_SCALING:
89 case CS35L32_AUDIO_LED_MNGR:
90 case CS35L32_ADSP_CTL:
91 case CS35L32_CLASSD_CTL:
92 case CS35L32_PROTECT_CTL:
93 case CS35L32_INT_MASK_1:
94 case CS35L32_INT_MASK_2:
95 case CS35L32_INT_MASK_3:
96 case CS35L32_INT_STATUS_1:
97 case CS35L32_INT_STATUS_2:
98 case CS35L32_INT_STATUS_3:
99 case CS35L32_LED_STATUS:
100 case CS35L32_FLASH_MODE:
101 case CS35L32_MOVIE_MODE:
102 case CS35L32_FLASH_TIMER:
103 case CS35L32_FLASH_INHIBIT:
104 return true; 79 return true;
105 default: 80 default:
106 return false; 81 return false;
@@ -110,15 +85,8 @@ static bool cs35l32_readable_register(struct device *dev, unsigned int reg)
110static bool cs35l32_volatile_register(struct device *dev, unsigned int reg) 85static bool cs35l32_volatile_register(struct device *dev, unsigned int reg)
111{ 86{
112 switch (reg) { 87 switch (reg) {
113 case CS35L32_DEVID_AB: 88 case CS35L32_DEVID_AB ... CS35L32_REV_ID:
114 case CS35L32_DEVID_CD: 89 case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
115 case CS35L32_DEVID_E:
116 case CS35L32_FAB_ID:
117 case CS35L32_REV_ID:
118 case CS35L32_INT_STATUS_1:
119 case CS35L32_INT_STATUS_2:
120 case CS35L32_INT_STATUS_3:
121 case CS35L32_LED_STATUS:
122 return true; 90 return true;
123 default: 91 default:
124 return false; 92 return false;
@@ -128,10 +96,7 @@ static bool cs35l32_volatile_register(struct device *dev, unsigned int reg)
128static bool cs35l32_precious_register(struct device *dev, unsigned int reg) 96static bool cs35l32_precious_register(struct device *dev, unsigned int reg)
129{ 97{
130 switch (reg) { 98 switch (reg) {
131 case CS35L32_INT_STATUS_1: 99 case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
132 case CS35L32_INT_STATUS_2:
133 case CS35L32_INT_STATUS_3:
134 case CS35L32_LED_STATUS:
135 return true; 100 return true;
136 default: 101 default:
137 return false; 102 return false;
@@ -276,7 +241,7 @@ static const struct snd_soc_codec_driver soc_codec_dev_cs35l32 = {
276}; 241};
277 242
278/* Current and threshold powerup sequence Pg37 in datasheet */ 243/* Current and threshold powerup sequence Pg37 in datasheet */
279static const struct reg_default cs35l32_monitor_patch[] = { 244static const struct reg_sequence cs35l32_monitor_patch[] = {
280 245
281 { 0x00, 0x99 }, 246 { 0x00, 0x99 },
282 { 0x48, 0x17 }, 247 { 0x48, 0x17 },
@@ -441,8 +406,7 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
441 if (IS_ERR(cs35l32->reset_gpio)) 406 if (IS_ERR(cs35l32->reset_gpio))
442 return PTR_ERR(cs35l32->reset_gpio); 407 return PTR_ERR(cs35l32->reset_gpio);
443 408
444 if (cs35l32->reset_gpio) 409 gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
445 gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
446 410
447 /* initialize codec */ 411 /* initialize codec */
448 ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg); 412 ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
@@ -536,8 +500,7 @@ static int cs35l32_i2c_remove(struct i2c_client *i2c_client)
536 snd_soc_unregister_codec(&i2c_client->dev); 500 snd_soc_unregister_codec(&i2c_client->dev);
537 501
538 /* Hold down reset */ 502 /* Hold down reset */
539 if (cs35l32->reset_gpio) 503 gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
540 gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
541 504
542 return 0; 505 return 0;
543} 506}
@@ -551,8 +514,7 @@ static int cs35l32_runtime_suspend(struct device *dev)
551 regcache_mark_dirty(cs35l32->regmap); 514 regcache_mark_dirty(cs35l32->regmap);
552 515
553 /* Hold down reset */ 516 /* Hold down reset */
554 if (cs35l32->reset_gpio) 517 gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
555 gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
556 518
557 /* remove power */ 519 /* remove power */
558 regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies), 520 regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
@@ -575,8 +537,7 @@ static int cs35l32_runtime_resume(struct device *dev)
575 return ret; 537 return ret;
576 } 538 }
577 539
578 if (cs35l32->reset_gpio) 540 gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
579 gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
580 541
581 regcache_cache_only(cs35l32->regmap, false); 542 regcache_cache_only(cs35l32->regmap, false);
582 regcache_sync(cs35l32->regmap); 543 regcache_sync(cs35l32->regmap);
diff --git a/sound/soc/codecs/cs35l32.h b/sound/soc/codecs/cs35l32.h
index 31ab804a22bc..1d6c2508cd41 100644
--- a/sound/soc/codecs/cs35l32.h
+++ b/sound/soc/codecs/cs35l32.h
@@ -80,7 +80,7 @@ struct cs35l32_platform_data {
80#define CS35L32_GAIN_MGR_MASK 0x08 80#define CS35L32_GAIN_MGR_MASK 0x08
81#define CS35L32_ADSP_SHARE_MASK 0x08 81#define CS35L32_ADSP_SHARE_MASK 0x08
82#define CS35L32_ADSP_DATACFG_MASK 0x30 82#define CS35L32_ADSP_DATACFG_MASK 0x30
83#define CS35L32_SDOUT_3ST 0x80 83#define CS35L32_SDOUT_3ST 0x08
84#define CS35L32_BATT_REC_MASK 0x0E 84#define CS35L32_BATT_REC_MASK 0x0E
85#define CS35L32_BATT_THRESH_MASK 0x30 85#define CS35L32_BATT_THRESH_MASK 0x30
86 86
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d1a77c7489d8..55db19ddc5ff 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,23 +60,7 @@ static const struct reg_default cs4265_reg_defaults[] = {
60static bool cs4265_readable_register(struct device *dev, unsigned int reg) 60static bool cs4265_readable_register(struct device *dev, unsigned int reg)
61{ 61{
62 switch (reg) { 62 switch (reg) {
63 case CS4265_PWRCTL: 63 case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
64 case CS4265_DAC_CTL:
65 case CS4265_ADC_CTL:
66 case CS4265_MCLK_FREQ:
67 case CS4265_SIG_SEL:
68 case CS4265_CHB_PGA_CTL:
69 case CS4265_CHA_PGA_CTL:
70 case CS4265_ADC_CTL2:
71 case CS4265_DAC_CHA_VOL:
72 case CS4265_DAC_CHB_VOL:
73 case CS4265_DAC_CTL2:
74 case CS4265_SPDIF_CTL1:
75 case CS4265_SPDIF_CTL2:
76 case CS4265_INT_MASK:
77 case CS4265_STATUS_MODE_MSB:
78 case CS4265_STATUS_MODE_LSB:
79 case CS4265_CHIP_ID:
80 return true; 64 return true;
81 default: 65 default:
82 return false; 66 return false;
@@ -457,14 +441,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
457 case SND_SOC_DAIFMT_RIGHT_J: 441 case SND_SOC_DAIFMT_RIGHT_J:
458 if (params_width(params) == 16) { 442 if (params_width(params) == 16) {
459 snd_soc_update_bits(codec, CS4265_DAC_CTL, 443 snd_soc_update_bits(codec, CS4265_DAC_CTL,
460 CS4265_DAC_CTL_DIF, (1 << 5)); 444 CS4265_DAC_CTL_DIF, (2 << 4));
461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 445 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
462 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 446 CS4265_SPDIF_CTL2_DIF, (2 << 6));
463 } else { 447 } else {
464 snd_soc_update_bits(codec, CS4265_DAC_CTL, 448 snd_soc_update_bits(codec, CS4265_DAC_CTL,
465 CS4265_DAC_CTL_DIF, (3 << 5)); 449 CS4265_DAC_CTL_DIF, (3 << 4));
466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 450 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
467 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 451 CS4265_SPDIF_CTL2_DIF, (3 << 6));
468 } 452 }
469 break; 453 break;
470 case SND_SOC_DAIFMT_LEFT_J: 454 case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +457,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
473 snd_soc_update_bits(codec, CS4265_ADC_CTL, 457 snd_soc_update_bits(codec, CS4265_ADC_CTL,
474 CS4265_ADC_DIF, 0); 458 CS4265_ADC_DIF, 0);
475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 459 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
476 CS4265_SPDIF_CTL2_DIF, (1 << 6)); 460 CS4265_SPDIF_CTL2_DIF, 0);
477 461
478 break; 462 break;
479 default: 463 default:
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index b82d8e5b76ed..b256424d3f9a 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -110,58 +110,7 @@ static const struct reg_default cs42l52_reg_defaults[] = {
110static bool cs42l52_readable_register(struct device *dev, unsigned int reg) 110static bool cs42l52_readable_register(struct device *dev, unsigned int reg)
111{ 111{
112 switch (reg) { 112 switch (reg) {
113 case CS42L52_CHIP: 113 case CS42L52_CHIP ... CS42L52_CHARGE_PUMP:
114 case CS42L52_PWRCTL1:
115 case CS42L52_PWRCTL2:
116 case CS42L52_PWRCTL3:
117 case CS42L52_CLK_CTL:
118 case CS42L52_IFACE_CTL1:
119 case CS42L52_IFACE_CTL2:
120 case CS42L52_ADC_PGA_A:
121 case CS42L52_ADC_PGA_B:
122 case CS42L52_ANALOG_HPF_CTL:
123 case CS42L52_ADC_HPF_FREQ:
124 case CS42L52_ADC_MISC_CTL:
125 case CS42L52_PB_CTL1:
126 case CS42L52_MISC_CTL:
127 case CS42L52_PB_CTL2:
128 case CS42L52_MICA_CTL:
129 case CS42L52_MICB_CTL:
130 case CS42L52_PGAA_CTL:
131 case CS42L52_PGAB_CTL:
132 case CS42L52_PASSTHRUA_VOL:
133 case CS42L52_PASSTHRUB_VOL:
134 case CS42L52_ADCA_VOL:
135 case CS42L52_ADCB_VOL:
136 case CS42L52_ADCA_MIXER_VOL:
137 case CS42L52_ADCB_MIXER_VOL:
138 case CS42L52_PCMA_MIXER_VOL:
139 case CS42L52_PCMB_MIXER_VOL:
140 case CS42L52_BEEP_FREQ:
141 case CS42L52_BEEP_VOL:
142 case CS42L52_BEEP_TONE_CTL:
143 case CS42L52_TONE_CTL:
144 case CS42L52_MASTERA_VOL:
145 case CS42L52_MASTERB_VOL:
146 case CS42L52_HPA_VOL:
147 case CS42L52_HPB_VOL:
148 case CS42L52_SPKA_VOL:
149 case CS42L52_SPKB_VOL:
150 case CS42L52_ADC_PCM_MIXER:
151 case CS42L52_LIMITER_CTL1:
152 case CS42L52_LIMITER_CTL2:
153 case CS42L52_LIMITER_AT_RATE:
154 case CS42L52_ALC_CTL:
155 case CS42L52_ALC_RATE:
156 case CS42L52_ALC_THRESHOLD:
157 case CS42L52_NOISE_GATE_CTL:
158 case CS42L52_CLK_STATUS:
159 case CS42L52_BATT_COMPEN:
160 case CS42L52_BATT_LEVEL:
161 case CS42L52_SPK_STATUS:
162 case CS42L52_TEM_CTL:
163 case CS42L52_THE_FOLDBACK:
164 case CS42L52_CHARGE_PUMP:
165 return true; 114 return true;
166 default: 115 default:
167 return false; 116 return false;
@@ -919,7 +868,7 @@ static int cs42l52_set_bias_level(struct snd_soc_codec *codec,
919 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_U20_3LE | \ 868 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_U20_3LE | \
920 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE) 869 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE)
921 870
922static struct snd_soc_dai_ops cs42l52_ops = { 871static const struct snd_soc_dai_ops cs42l52_ops = {
923 .hw_params = cs42l52_pcm_hw_params, 872 .hw_params = cs42l52_pcm_hw_params,
924 .digital_mute = cs42l52_digital_mute, 873 .digital_mute = cs42l52_digital_mute,
925 .set_fmt = cs42l52_set_fmt, 874 .set_fmt = cs42l52_set_fmt,
@@ -1118,7 +1067,7 @@ static const struct snd_soc_codec_driver soc_codec_dev_cs42l52 = {
1118}; 1067};
1119 1068
1120/* Current and threshold powerup sequence Pg37 */ 1069/* Current and threshold powerup sequence Pg37 */
1121static const struct reg_default cs42l52_threshold_patch[] = { 1070static const struct reg_sequence cs42l52_threshold_patch[] = {
1122 1071
1123 { 0x00, 0x99 }, 1072 { 0x00, 0x99 },
1124 { 0x3E, 0xBA }, 1073 { 0x3E, 0xBA },
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 4ae793365985..52fe7a5ac408 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -115,52 +115,7 @@ static const struct reg_default cs42l56_reg_defaults[] = {
115static bool cs42l56_readable_register(struct device *dev, unsigned int reg) 115static bool cs42l56_readable_register(struct device *dev, unsigned int reg)
116{ 116{
117 switch (reg) { 117 switch (reg) {
118 case CS42L56_CHIP_ID_1: 118 case CS42L56_CHIP_ID_1 ... CS42L56_LIM_ATTACK_RATE:
119 case CS42L56_CHIP_ID_2:
120 case CS42L56_PWRCTL_1:
121 case CS42L56_PWRCTL_2:
122 case CS42L56_CLKCTL_1:
123 case CS42L56_CLKCTL_2:
124 case CS42L56_SERIAL_FMT:
125 case CS42L56_CLASSH_CTL:
126 case CS42L56_MISC_CTL:
127 case CS42L56_INT_STATUS:
128 case CS42L56_PLAYBACK_CTL:
129 case CS42L56_DSP_MUTE_CTL:
130 case CS42L56_ADCA_MIX_VOLUME:
131 case CS42L56_ADCB_MIX_VOLUME:
132 case CS42L56_PCMA_MIX_VOLUME:
133 case CS42L56_PCMB_MIX_VOLUME:
134 case CS42L56_ANAINPUT_ADV_VOLUME:
135 case CS42L56_DIGINPUT_ADV_VOLUME:
136 case CS42L56_MASTER_A_VOLUME:
137 case CS42L56_MASTER_B_VOLUME:
138 case CS42L56_BEEP_FREQ_ONTIME:
139 case CS42L56_BEEP_FREQ_OFFTIME:
140 case CS42L56_BEEP_TONE_CFG:
141 case CS42L56_TONE_CTL:
142 case CS42L56_CHAN_MIX_SWAP:
143 case CS42L56_AIN_REFCFG_ADC_MUX:
144 case CS42L56_HPF_CTL:
145 case CS42L56_MISC_ADC_CTL:
146 case CS42L56_GAIN_BIAS_CTL:
147 case CS42L56_PGAA_MUX_VOLUME:
148 case CS42L56_PGAB_MUX_VOLUME:
149 case CS42L56_ADCA_ATTENUATOR:
150 case CS42L56_ADCB_ATTENUATOR:
151 case CS42L56_ALC_EN_ATTACK_RATE:
152 case CS42L56_ALC_RELEASE_RATE:
153 case CS42L56_ALC_THRESHOLD:
154 case CS42L56_NOISE_GATE_CTL:
155 case CS42L56_ALC_LIM_SFT_ZC:
156 case CS42L56_AMUTE_HPLO_MUX:
157 case CS42L56_HPA_VOLUME:
158 case CS42L56_HPB_VOLUME:
159 case CS42L56_LOA_VOLUME:
160 case CS42L56_LOB_VOLUME:
161 case CS42L56_LIM_THRESHOLD_CTL:
162 case CS42L56_LIM_CTL_RELEASE_RATE:
163 case CS42L56_LIM_ATTACK_RATE:
164 return true; 119 return true;
165 default: 120 default:
166 return false; 121 return false;
@@ -989,7 +944,7 @@ static int cs42l56_set_bias_level(struct snd_soc_codec *codec,
989 SNDRV_PCM_FMTBIT_S32_LE) 944 SNDRV_PCM_FMTBIT_S32_LE)
990 945
991 946
992static struct snd_soc_dai_ops cs42l56_ops = { 947static const struct snd_soc_dai_ops cs42l56_ops = {
993 .hw_params = cs42l56_pcm_hw_params, 948 .hw_params = cs42l56_pcm_hw_params,
994 .digital_mute = cs42l56_digital_mute, 949 .digital_mute = cs42l56_digital_mute,
995 .set_fmt = cs42l56_set_dai_fmt, 950 .set_fmt = cs42l56_set_dai_fmt,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 7cb1d7091dae..a8f468689d26 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -153,100 +153,8 @@ static bool cs42l73_volatile_register(struct device *dev, unsigned int reg)
153static bool cs42l73_readable_register(struct device *dev, unsigned int reg) 153static bool cs42l73_readable_register(struct device *dev, unsigned int reg)
154{ 154{
155 switch (reg) { 155 switch (reg) {
156 case CS42L73_DEVID_AB: 156 case CS42L73_DEVID_AB ... CS42L73_DEVID_E:
157 case CS42L73_DEVID_CD: 157 case CS42L73_REVID ... CS42L73_IM2:
158 case CS42L73_DEVID_E:
159 case CS42L73_REVID:
160 case CS42L73_PWRCTL1:
161 case CS42L73_PWRCTL2:
162 case CS42L73_PWRCTL3:
163 case CS42L73_CPFCHC:
164 case CS42L73_OLMBMSDC:
165 case CS42L73_DMMCC:
166 case CS42L73_XSPC:
167 case CS42L73_XSPMMCC:
168 case CS42L73_ASPC:
169 case CS42L73_ASPMMCC:
170 case CS42L73_VSPC:
171 case CS42L73_VSPMMCC:
172 case CS42L73_VXSPFS:
173 case CS42L73_MIOPC:
174 case CS42L73_ADCIPC:
175 case CS42L73_MICAPREPGAAVOL:
176 case CS42L73_MICBPREPGABVOL:
177 case CS42L73_IPADVOL:
178 case CS42L73_IPBDVOL:
179 case CS42L73_PBDC:
180 case CS42L73_HLADVOL:
181 case CS42L73_HLBDVOL:
182 case CS42L73_SPKDVOL:
183 case CS42L73_ESLDVOL:
184 case CS42L73_HPAAVOL:
185 case CS42L73_HPBAVOL:
186 case CS42L73_LOAAVOL:
187 case CS42L73_LOBAVOL:
188 case CS42L73_STRINV:
189 case CS42L73_XSPINV:
190 case CS42L73_ASPINV:
191 case CS42L73_VSPINV:
192 case CS42L73_LIMARATEHL:
193 case CS42L73_LIMRRATEHL:
194 case CS42L73_LMAXHL:
195 case CS42L73_LIMARATESPK:
196 case CS42L73_LIMRRATESPK:
197 case CS42L73_LMAXSPK:
198 case CS42L73_LIMARATEESL:
199 case CS42L73_LIMRRATEESL:
200 case CS42L73_LMAXESL:
201 case CS42L73_ALCARATE:
202 case CS42L73_ALCRRATE:
203 case CS42L73_ALCMINMAX:
204 case CS42L73_NGCAB:
205 case CS42L73_ALCNGMC:
206 case CS42L73_MIXERCTL:
207 case CS42L73_HLAIPAA:
208 case CS42L73_HLBIPBA:
209 case CS42L73_HLAXSPAA:
210 case CS42L73_HLBXSPBA:
211 case CS42L73_HLAASPAA:
212 case CS42L73_HLBASPBA:
213 case CS42L73_HLAVSPMA:
214 case CS42L73_HLBVSPMA:
215 case CS42L73_XSPAIPAA:
216 case CS42L73_XSPBIPBA:
217 case CS42L73_XSPAXSPAA:
218 case CS42L73_XSPBXSPBA:
219 case CS42L73_XSPAASPAA:
220 case CS42L73_XSPAASPBA:
221 case CS42L73_XSPAVSPMA:
222 case CS42L73_XSPBVSPMA:
223 case CS42L73_ASPAIPAA:
224 case CS42L73_ASPBIPBA:
225 case CS42L73_ASPAXSPAA:
226 case CS42L73_ASPBXSPBA:
227 case CS42L73_ASPAASPAA:
228 case CS42L73_ASPBASPBA:
229 case CS42L73_ASPAVSPMA:
230 case CS42L73_ASPBVSPMA:
231 case CS42L73_VSPAIPAA:
232 case CS42L73_VSPBIPBA:
233 case CS42L73_VSPAXSPAA:
234 case CS42L73_VSPBXSPBA:
235 case CS42L73_VSPAASPAA:
236 case CS42L73_VSPBASPBA:
237 case CS42L73_VSPAVSPMA:
238 case CS42L73_VSPBVSPMA:
239 case CS42L73_MMIXCTL:
240 case CS42L73_SPKMIPMA:
241 case CS42L73_SPKMXSPA:
242 case CS42L73_SPKMASPA:
243 case CS42L73_SPKMVSPMA:
244 case CS42L73_ESLMIPMA:
245 case CS42L73_ESLMXSPA:
246 case CS42L73_ESLMASPA:
247 case CS42L73_ESLMVSPMA:
248 case CS42L73_IM1:
249 case CS42L73_IM2:
250 return true; 158 return true;
251 default: 159 default:
252 return false; 160 return false;
@@ -1236,8 +1144,8 @@ static int cs42l73_set_tristate(struct snd_soc_dai *dai, int tristate)
1236 struct snd_soc_codec *codec = dai->codec; 1144 struct snd_soc_codec *codec = dai->codec;
1237 int id = dai->id; 1145 int id = dai->id;
1238 1146
1239 return snd_soc_update_bits(codec, CS42L73_SPC(id), 1147 return snd_soc_update_bits(codec, CS42L73_SPC(id), CS42L73_SP_3ST,
1240 0x7F, tristate << 7); 1148 tristate << 7);
1241} 1149}
1242 1150
1243static const struct snd_pcm_hw_constraint_list constraints_12_24 = { 1151static const struct snd_pcm_hw_constraint_list constraints_12_24 = {
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
new file mode 100644
index 000000000000..0ac8fc5ed4ae
--- /dev/null
+++ b/sound/soc/codecs/cs4349.c
@@ -0,0 +1,392 @@
1/*
2 * cs4349.c -- CS4349 ALSA Soc Audio driver
3 *
4 * Copyright 2015 Cirrus Logic, Inc.
5 *
6 * Authors: Tim Howe <Tim.Howe@cirrus.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/gpio.h>
19#include <linux/gpio/consumer.h>
20#include <linux/platform_device.h>
21#include <linux/pm.h>
22#include <linux/i2c.h>
23#include <linux/of_device.h>
24#include <linux/regmap.h>
25#include <linux/slab.h>
26#include <sound/core.h>
27#include <sound/pcm.h>
28#include <sound/pcm_params.h>
29#include <sound/soc.h>
30#include <sound/soc-dapm.h>
31#include <sound/initval.h>
32#include <sound/tlv.h>
33#include "cs4349.h"
34
35
36static const struct reg_default cs4349_reg_defaults[] = {
37 { 2, 0x00 }, /* r02 - Mode Control */
38 { 3, 0x09 }, /* r03 - Volume, Mixing and Inversion Control */
39 { 4, 0x81 }, /* r04 - Mute Control */
40 { 5, 0x00 }, /* r05 - Channel A Volume Control */
41 { 6, 0x00 }, /* r06 - Channel B Volume Control */
42 { 7, 0xB1 }, /* r07 - Ramp and Filter Control */
43 { 8, 0x1C }, /* r08 - Misc. Control */
44};
45
46/* Private data for the CS4349 */
47struct cs4349_private {
48 struct regmap *regmap;
49 struct gpio_desc *reset_gpio;
50 unsigned int mode;
51 int rate;
52};
53
54static bool cs4349_readable_register(struct device *dev, unsigned int reg)
55{
56 switch (reg) {
57 case CS4349_CHIPID ... CS4349_MISC:
58 return true;
59 default:
60 return false;
61 }
62}
63
64static bool cs4349_writeable_register(struct device *dev, unsigned int reg)
65{
66 switch (reg) {
67 case CS4349_MODE ... CS4349_MISC:
68 return true;
69 default:
70 return false;
71 }
72}
73
74static int cs4349_set_dai_fmt(struct snd_soc_dai *codec_dai,
75 unsigned int format)
76{
77 struct snd_soc_codec *codec = codec_dai->codec;
78 struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
79 unsigned int fmt;
80
81 fmt = format & SND_SOC_DAIFMT_FORMAT_MASK;
82
83 switch (fmt) {
84 case SND_SOC_DAIFMT_I2S:
85 case SND_SOC_DAIFMT_LEFT_J:
86 case SND_SOC_DAIFMT_RIGHT_J:
87 cs4349->mode = format & SND_SOC_DAIFMT_FORMAT_MASK;
88 break;
89 default:
90 return -EINVAL;
91 }
92
93 return 0;
94}
95
96static int cs4349_pcm_hw_params(struct snd_pcm_substream *substream,
97 struct snd_pcm_hw_params *params,
98 struct snd_soc_dai *dai)
99{
100 struct snd_soc_codec *codec = dai->codec;
101 struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
102 int fmt, ret;
103
104 cs4349->rate = params_rate(params);
105
106 switch (cs4349->mode) {
107 case SND_SOC_DAIFMT_I2S:
108 fmt = DIF_I2S;
109 break;
110 case SND_SOC_DAIFMT_LEFT_J:
111 fmt = DIF_LEFT_JST;
112 break;
113 case SND_SOC_DAIFMT_RIGHT_J:
114 switch (params_width(params)) {
115 case 16:
116 fmt = DIF_RGHT_JST16;
117 break;
118 case 24:
119 fmt = DIF_RGHT_JST24;
120 break;
121 default:
122 return -EINVAL;
123 }
124 break;
125 default:
126 return -EINVAL;
127 }
128
129 ret = snd_soc_update_bits(codec, CS4349_MODE, DIF_MASK,
130 MODE_FORMAT(fmt));
131 if (ret < 0)
132 return ret;
133
134 return 0;
135}
136
137static int cs4349_digital_mute(struct snd_soc_dai *dai, int mute)
138{
139 struct snd_soc_codec *codec = dai->codec;
140 int reg;
141
142 reg = 0;
143 if (mute)
144 reg = MUTE_AB_MASK;
145
146 return snd_soc_update_bits(codec, CS4349_MUTE, MUTE_AB_MASK, reg);
147}
148
149static DECLARE_TLV_DB_SCALE(dig_tlv, -12750, 50, 0);
150
151static const char * const chan_mix_texts[] = {
152 "Mute", "MuteA", "MuteA SwapB", "MuteA MonoB", "SwapA MuteB",
153 "BothR", "Swap", "SwapA MonoB", "MuteB", "Normal", "BothL",
154 "MonoB", "MonoA MuteB", "MonoA", "MonoA SwapB", "Mono",
155 /*Normal == Channel A = Left, Channel B = Right*/
156};
157
158static const char * const fm_texts[] = {
159 "Auto", "Single", "Double", "Quad",
160};
161
162static const char * const deemph_texts[] = {
163 "None", "44.1k", "48k", "32k",
164};
165
166static const char * const softr_zeroc_texts[] = {
167 "Immediate", "Zero Cross", "Soft Ramp", "SR on ZC",
168};
169
170static int deemph_values[] = {
171 0, 4, 8, 12,
172};
173
174static int softr_zeroc_values[] = {
175 0, 64, 128, 192,
176};
177
178static const struct soc_enum chan_mix_enum =
179 SOC_ENUM_SINGLE(CS4349_VMI, 0,
180 ARRAY_SIZE(chan_mix_texts),
181 chan_mix_texts);
182
183static const struct soc_enum fm_mode_enum =
184 SOC_ENUM_SINGLE(CS4349_MODE, 0,
185 ARRAY_SIZE(fm_texts),
186 fm_texts);
187
188static SOC_VALUE_ENUM_SINGLE_DECL(deemph_enum, CS4349_MODE, 0, DEM_MASK,
189 deemph_texts, deemph_values);
190
191static SOC_VALUE_ENUM_SINGLE_DECL(softr_zeroc_enum, CS4349_RMPFLT, 0,
192 SR_ZC_MASK, softr_zeroc_texts,
193 softr_zeroc_values);
194
195static const struct snd_kcontrol_new cs4349_snd_controls[] = {
196 SOC_DOUBLE_R_TLV("Master Playback Volume",
197 CS4349_VOLA, CS4349_VOLB, 0, 0xFF, 1, dig_tlv),
198 SOC_ENUM("Functional Mode", fm_mode_enum),
199 SOC_ENUM("De-Emphasis Control", deemph_enum),
200 SOC_ENUM("Soft Ramp Zero Cross Control", softr_zeroc_enum),
201 SOC_ENUM("Channel Mixer", chan_mix_enum),
202 SOC_SINGLE("VolA = VolB Switch", CS4349_VMI, 7, 1, 0),
203 SOC_SINGLE("InvertA Switch", CS4349_VMI, 6, 1, 0),
204 SOC_SINGLE("InvertB Switch", CS4349_VMI, 5, 1, 0),
205 SOC_SINGLE("Auto-Mute Switch", CS4349_MUTE, 7, 1, 0),
206 SOC_SINGLE("MUTEC A = B Switch", CS4349_MUTE, 5, 1, 0),
207 SOC_SINGLE("Soft Ramp Up Switch", CS4349_RMPFLT, 5, 1, 0),
208 SOC_SINGLE("Soft Ramp Down Switch", CS4349_RMPFLT, 4, 1, 0),
209 SOC_SINGLE("Slow Roll Off Filter Switch", CS4349_RMPFLT, 2, 1, 0),
210 SOC_SINGLE("Freeze Switch", CS4349_MISC, 5, 1, 0),
211 SOC_SINGLE("Popguard Switch", CS4349_MISC, 4, 1, 0),
212};
213
214static const struct snd_soc_dapm_widget cs4349_dapm_widgets[] = {
215 SND_SOC_DAPM_DAC("HiFi DAC", NULL, SND_SOC_NOPM, 0, 0),
216
217 SND_SOC_DAPM_OUTPUT("OutputA"),
218 SND_SOC_DAPM_OUTPUT("OutputB"),
219};
220
221static const struct snd_soc_dapm_route cs4349_routes[] = {
222 {"DAC Playback", NULL, "OutputA"},
223 {"DAC Playback", NULL, "OutputB"},
224
225 {"OutputA", NULL, "HiFi DAC"},
226 {"OutputB", NULL, "HiFi DAC"},
227};
228
229#define CS4349_PCM_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
230 SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | \
231 SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE | \
232 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE | \
233 SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE | \
234 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE | \
235 SNDRV_PCM_FMTBIT_S32_LE)
236
237#define CS4349_PCM_RATES SNDRV_PCM_RATE_8000_192000
238
239static const struct snd_soc_dai_ops cs4349_dai_ops = {
240 .hw_params = cs4349_pcm_hw_params,
241 .set_fmt = cs4349_set_dai_fmt,
242 .digital_mute = cs4349_digital_mute,
243};
244
245static struct snd_soc_dai_driver cs4349_dai = {
246 .name = "cs4349_hifi",
247 .playback = {
248 .stream_name = "DAC Playback",
249 .channels_min = 1,
250 .channels_max = 2,
251 .rates = CS4349_PCM_RATES,
252 .formats = CS4349_PCM_FORMATS,
253 },
254 .ops = &cs4349_dai_ops,
255 .symmetric_rates = 1,
256};
257
258static struct snd_soc_codec_driver soc_codec_dev_cs4349 = {
259 .controls = cs4349_snd_controls,
260 .num_controls = ARRAY_SIZE(cs4349_snd_controls),
261
262 .dapm_widgets = cs4349_dapm_widgets,
263 .num_dapm_widgets = ARRAY_SIZE(cs4349_dapm_widgets),
264 .dapm_routes = cs4349_routes,
265 .num_dapm_routes = ARRAY_SIZE(cs4349_routes),
266};
267
268static const struct regmap_config cs4349_regmap = {
269 .reg_bits = 8,
270 .val_bits = 8,
271
272 .max_register = CS4349_MISC,
273 .reg_defaults = cs4349_reg_defaults,
274 .num_reg_defaults = ARRAY_SIZE(cs4349_reg_defaults),
275 .readable_reg = cs4349_readable_register,
276 .writeable_reg = cs4349_writeable_register,
277 .cache_type = REGCACHE_RBTREE,
278};
279
280static int cs4349_i2c_probe(struct i2c_client *client,
281 const struct i2c_device_id *id)
282{
283 struct cs4349_private *cs4349;
284 int ret;
285
286 cs4349 = devm_kzalloc(&client->dev, sizeof(*cs4349), GFP_KERNEL);
287 if (!cs4349)
288 return -ENOMEM;
289
290 cs4349->regmap = devm_regmap_init_i2c(client, &cs4349_regmap);
291 if (IS_ERR(cs4349->regmap)) {
292 ret = PTR_ERR(cs4349->regmap);
293 dev_err(&client->dev, "regmap_init() failed: %d\n", ret);
294 return ret;
295 }
296
297 /* Reset the Device */
298 cs4349->reset_gpio = devm_gpiod_get_optional(&client->dev,
299 "reset", GPIOD_OUT_LOW);
300 if (IS_ERR(cs4349->reset_gpio))
301 return PTR_ERR(cs4349->reset_gpio);
302
303 gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
304
305 i2c_set_clientdata(client, cs4349);
306
307 return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4349,
308 &cs4349_dai, 1);
309}
310
311static int cs4349_i2c_remove(struct i2c_client *client)
312{
313 struct cs4349_private *cs4349 = i2c_get_clientdata(client);
314
315 snd_soc_unregister_codec(&client->dev);
316
317 /* Hold down reset */
318 gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
319
320 return 0;
321}
322
323#ifdef CONFIG_PM
324static int cs4349_runtime_suspend(struct device *dev)
325{
326 struct cs4349_private *cs4349 = dev_get_drvdata(dev);
327 int ret;
328
329 ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, PWR_DWN);
330 if (ret < 0)
331 return ret;
332
333 regcache_cache_only(cs4349->regmap, true);
334
335 /* Hold down reset */
336 gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
337
338 return 0;
339}
340
341static int cs4349_runtime_resume(struct device *dev)
342{
343 struct cs4349_private *cs4349 = dev_get_drvdata(dev);
344 int ret;
345
346 ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, 0);
347 if (ret < 0)
348 return ret;
349
350 gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
351
352 regcache_cache_only(cs4349->regmap, false);
353 regcache_sync(cs4349->regmap);
354
355 return 0;
356}
357#endif
358
359static const struct dev_pm_ops cs4349_runtime_pm = {
360 SET_RUNTIME_PM_OPS(cs4349_runtime_suspend, cs4349_runtime_resume,
361 NULL)
362};
363
364static const struct of_device_id cs4349_of_match[] = {
365 { .compatible = "cirrus,cs4349", },
366 {},
367};
368
369MODULE_DEVICE_TABLE(of, cs4349_of_match);
370
371static const struct i2c_device_id cs4349_i2c_id[] = {
372 {"cs4349", 0},
373 {}
374};
375
376MODULE_DEVICE_TABLE(i2c, cs4349_i2c_id);
377
378static struct i2c_driver cs4349_i2c_driver = {
379 .driver = {
380 .name = "cs4349",
381 .of_match_table = cs4349_of_match,
382 },
383 .id_table = cs4349_i2c_id,
384 .probe = cs4349_i2c_probe,
385 .remove = cs4349_i2c_remove,
386};
387
388module_i2c_driver(cs4349_i2c_driver);
389
390MODULE_AUTHOR("Tim Howe <tim.howe@cirrus.com>");
391MODULE_DESCRIPTION("Cirrus Logic CS4349 ALSA SoC Codec Driver");
392MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4349.h b/sound/soc/codecs/cs4349.h
new file mode 100644
index 000000000000..d58c06a25358
--- /dev/null
+++ b/sound/soc/codecs/cs4349.h
@@ -0,0 +1,136 @@
1/*
2 * ALSA SoC CS4349 codec driver
3 *
4 * Copyright 2015 Cirrus Logic, Inc.
5 *
6 * Author: Tim Howe <Tim.Howe@cirrus.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 */
18
19#ifndef __CS4349_H__
20#define __CS4349_H__
21
22/* CS4349 registers addresses */
23#define CS4349_CHIPID 0x01 /* Device and Rev ID, Read Only */
24#define CS4349_MODE 0x02 /* Mode Control */
25#define CS4349_VMI 0x03 /* Volume, Mixing, Inversion Control */
26#define CS4349_MUTE 0x04 /* Mute Control */
27#define CS4349_VOLA 0x05 /* DAC Channel A Volume Control */
28#define CS4349_VOLB 0x06 /* DAC Channel B Volume Control */
29#define CS4349_RMPFLT 0x07 /* Ramp and Filter Control */
30#define CS4349_MISC 0x08 /* Power Down,Freeze Control,Pop Stop*/
31
32#define CS4349_I2C_INCR 0x80
33
34
35/* Device and Revision ID */
36#define CS4349_REVA 0xF0 /* Rev A */
37#define CS4349_REVB 0xF1 /* Rev B */
38#define CS4349_REVC2 0xFF /* Rev C2 */
39
40
41/* PDN_DONE Poll Maximum
42 * If soft ramp is set it will take much longer to power down
43 * the system.
44 */
45#define PDN_POLL_MAX 900
46
47
48/* Bitfield Definitions */
49
50/* CS4349_MODE */
51/* (Digital Interface Format, De-Emphasis Control, Functional Mode */
52#define DIF2 (1 << 6)
53#define DIF1 (1 << 5)
54#define DIF0 (1 << 4)
55#define DEM1 (1 << 3)
56#define DEM0 (1 << 2)
57#define FM1 (1 << 1)
58#define DIF_LEFT_JST 0x00
59#define DIF_I2S 0x01
60#define DIF_RGHT_JST16 0x02
61#define DIF_RGHT_JST24 0x03
62#define DIF_TDM0 0x04
63#define DIF_TDM1 0x05
64#define DIF_TDM2 0x06
65#define DIF_TDM3 0x07
66#define DIF_MASK 0x70
67#define MODE_FORMAT(x) (((x)&7)<<4)
68#define DEM_MASK 0x0C
69#define NO_DEM 0x00
70#define DEM_441 0x04
71#define DEM_48K 0x08
72#define DEM_32K 0x0C
73#define FM_AUTO 0x00
74#define FM_SNGL 0x01
75#define FM_DBL 0x02
76#define FM_QUAD 0x03
77#define FM_SNGL_MIN 30000
78#define FM_SNGL_MAX 54000
79#define FM_DBL_MAX 108000
80#define FM_QUAD_MAX 216000
81#define FM_MASK 0x03
82
83/* CS4349_VMI (VMI = Volume, Mixing and Inversion Controls) */
84#define VOLBISA (1 << 7)
85#define VOLAISB (1 << 7)
86/* INVERT_A only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
87#define INVERT_A (1 << 6)
88/* INVERT_B only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
89#define INVERT_B (1 << 5)
90#define ATAPI3 (1 << 3)
91#define ATAPI2 (1 << 2)
92#define ATAPI1 (1 << 1)
93#define ATAPI0 (1 << 0)
94#define MUTEAB 0x00
95#define MUTEA_RIGHTB 0x01
96#define MUTEA_LEFTB 0x02
97#define MUTEA_SUMLRDIV2B 0x03
98#define RIGHTA_MUTEB 0x04
99#define RIGHTA_RIGHTB 0x05
100#define RIGHTA_LEFTB 0x06
101#define RIGHTA_SUMLRDIV2B 0x07
102#define LEFTA_MUTEB 0x08
103#define LEFTA_RIGHTB 0x09 /* Default */
104#define LEFTA_LEFTB 0x0A
105#define LEFTA_SUMLRDIV2B 0x0B
106#define SUMLRDIV2A_MUTEB 0x0C
107#define SUMLRDIV2A_RIGHTB 0x0D
108#define SUMLRDIV2A_LEFTB 0x0E
109#define SUMLRDIV2_AB 0x0F
110#define CHMIX_MASK 0x0F
111
112/* CS4349_MUTE */
113#define AUTOMUTE (1 << 7)
114#define MUTEC_AB (1 << 5)
115#define MUTE_A (1 << 4)
116#define MUTE_B (1 << 3)
117#define MUTE_AB_MASK 0x18
118
119/* CS4349_RMPFLT (Ramp and Filter Control) */
120#define SCZ1 (1 << 7)
121#define SCZ0 (1 << 6)
122#define RMP_UP (1 << 5)
123#define RMP_DN (1 << 4)
124#define FILT_SEL (1 << 2)
125#define IMMDT_CHNG 0x31
126#define ZEROCRSS 0x71
127#define SOFT_RMP 0xB1
128#define SFTRMP_ZEROCRSS 0xF1
129#define SR_ZC_MASK 0xC0
130
131/* CS4349_MISC */
132#define PWR_DWN (1 << 7)
133#define FREEZE (1 << 5)
134#define POPG_EN (1 << 4)
135
136#endif /* __CS4349_H__ */
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 9c7b41a8642d..c7b3e927c606 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -680,7 +680,7 @@ struct da7210_priv {
680 int master; 680 int master;
681}; 681};
682 682
683static struct reg_default da7210_reg_defaults[] = { 683static const struct reg_default da7210_reg_defaults[] = {
684 { 0x00, 0x00 }, 684 { 0x00, 0x00 },
685 { 0x01, 0x11 }, 685 { 0x01, 0x11 },
686 { 0x03, 0x00 }, 686 { 0x03, 0x00 },
@@ -1182,7 +1182,7 @@ static struct snd_soc_codec_driver soc_codec_dev_da7210 = {
1182 1182
1183#if IS_ENABLED(CONFIG_I2C) 1183#if IS_ENABLED(CONFIG_I2C)
1184 1184
1185static struct reg_default da7210_regmap_i2c_patch[] = { 1185static const struct reg_sequence da7210_regmap_i2c_patch[] = {
1186 1186
1187 /* System controller master disable */ 1187 /* System controller master disable */
1188 { DA7210_STARTUP1, 0x00 }, 1188 { DA7210_STARTUP1, 0x00 },
@@ -1268,7 +1268,7 @@ static struct i2c_driver da7210_i2c_driver = {
1268 1268
1269#if defined(CONFIG_SPI_MASTER) 1269#if defined(CONFIG_SPI_MASTER)
1270 1270
1271static struct reg_default da7210_regmap_spi_patch[] = { 1271static const struct reg_sequence da7210_regmap_spi_patch[] = {
1272 /* Dummy read to give two pulses over nCS for SPI */ 1272 /* Dummy read to give two pulses over nCS for SPI */
1273 { DA7210_AUX2, 0x00 }, 1273 { DA7210_AUX2, 0x00 },
1274 { DA7210_AUX2, 0x00 }, 1274 { DA7210_AUX2, 0x00 },
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index f635401b7730..47fc3bec8a9c 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -954,7 +954,7 @@ static const struct snd_soc_dapm_route da7213_audio_map[] = {
954 {"LINE", NULL, "Lineout PGA"}, 954 {"LINE", NULL, "Lineout PGA"},
955}; 955};
956 956
957static struct reg_default da7213_reg_defaults[] = { 957static const struct reg_default da7213_reg_defaults[] = {
958 { DA7213_DIG_ROUTING_DAI, 0x10 }, 958 { DA7213_DIG_ROUTING_DAI, 0x10 },
959 { DA7213_SR, 0x0A }, 959 { DA7213_SR, 0x0A },
960 { DA7213_REFERENCES, 0x80 }, 960 { DA7213_REFERENCES, 0x80 },
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 5446d047d2de..1d5a89c5164b 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -43,7 +43,7 @@ struct da732x_priv {
43/* 43/*
44 * da732x register cache - default settings 44 * da732x register cache - default settings
45 */ 45 */
46static struct reg_default da732x_reg_cache[] = { 46static const struct reg_default da732x_reg_cache[] = {
47 { DA732X_REG_REF1 , 0x02 }, 47 { DA732X_REG_REF1 , 0x02 },
48 { DA732X_REG_BIAS_EN , 0x80 }, 48 { DA732X_REG_BIAS_EN , 0x80 },
49 { DA732X_REG_BIAS1 , 0x00 }, 49 { DA732X_REG_BIAS1 , 0x00 },
@@ -1196,13 +1196,7 @@ static int da732x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
1196#define DA732X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ 1196#define DA732X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
1197 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) 1197 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
1198 1198
1199static struct snd_soc_dai_ops da732x_dai1_ops = { 1199static const struct snd_soc_dai_ops da732x_dai_ops = {
1200 .hw_params = da732x_hw_params,
1201 .set_fmt = da732x_set_dai_fmt,
1202 .set_sysclk = da732x_set_dai_sysclk,
1203};
1204
1205static struct snd_soc_dai_ops da732x_dai2_ops = {
1206 .hw_params = da732x_hw_params, 1200 .hw_params = da732x_hw_params,
1207 .set_fmt = da732x_set_dai_fmt, 1201 .set_fmt = da732x_set_dai_fmt,
1208 .set_sysclk = da732x_set_dai_sysclk, 1202 .set_sysclk = da732x_set_dai_sysclk,
@@ -1227,7 +1221,7 @@ static struct snd_soc_dai_driver da732x_dai[] = {
1227 .rates = DA732X_RATES, 1221 .rates = DA732X_RATES,
1228 .formats = DA732X_FORMATS, 1222 .formats = DA732X_FORMATS,
1229 }, 1223 },
1230 .ops = &da732x_dai1_ops, 1224 .ops = &da732x_dai_ops,
1231 }, 1225 },
1232 { 1226 {
1233 .name = "DA732X_AIFB", 1227 .name = "DA732X_AIFB",
@@ -1247,7 +1241,7 @@ static struct snd_soc_dai_driver da732x_dai[] = {
1247 .rates = DA732X_RATES, 1241 .rates = DA732X_RATES,
1248 .formats = DA732X_FORMATS, 1242 .formats = DA732X_FORMATS,
1249 }, 1243 },
1250 .ops = &da732x_dai2_ops, 1244 .ops = &da732x_dai_ops,
1251 }, 1245 },
1252}; 1246};
1253 1247
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 7d5baa66b132..19635d830b47 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -948,7 +948,7 @@ struct da9055_priv {
948 struct da9055_platform_data *pdata; 948 struct da9055_platform_data *pdata;
949}; 949};
950 950
951static struct reg_default da9055_reg_defaults[] = { 951static const struct reg_default da9055_reg_defaults[] = {
952 { 0x21, 0x10 }, 952 { 0x21, 0x10 },
953 { 0x22, 0x0A }, 953 { 0x22, 0x0A },
954 { 0x23, 0x00 }, 954 { 0x23, 0x00 },
@@ -1533,6 +1533,7 @@ static const struct of_device_id da9055_of_match[] = {
1533 { .compatible = "dlg,da9055-codec", }, 1533 { .compatible = "dlg,da9055-codec", },
1534 { } 1534 { }
1535}; 1535};
1536MODULE_DEVICE_TABLE(of, da9055_of_match);
1536 1537
1537/* I2C codec control layer */ 1538/* I2C codec control layer */
1538static struct i2c_driver da9055_i2c_driver = { 1539static struct i2c_driver da9055_i2c_driver = {
diff --git a/sound/soc/codecs/gtm601.c b/sound/soc/codecs/gtm601.c
new file mode 100644
index 000000000000..0b80052996d3
--- /dev/null
+++ b/sound/soc/codecs/gtm601.c
@@ -0,0 +1,95 @@
1/*
2 * This is a simple driver for the GTM601 Voice PCM interface
3 *
4 * Copyright (C) 2015 Goldelico GmbH
5 *
6 * Author: Marek Belisko <marek@goldelico.com>
7 *
8 * Based on wm8727.c driver
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/device.h>
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/ac97_codec.h>
23#include <sound/initval.h>
24#include <sound/soc.h>
25
26static const struct snd_soc_dapm_widget gtm601_dapm_widgets[] = {
27 SND_SOC_DAPM_OUTPUT("AOUT"),
28 SND_SOC_DAPM_INPUT("AIN"),
29};
30
31static const struct snd_soc_dapm_route gtm601_dapm_routes[] = {
32 { "AOUT", NULL, "Playback" },
33 { "Capture", NULL, "AIN" },
34};
35
36static struct snd_soc_dai_driver gtm601_dai = {
37 .name = "gtm601",
38 .playback = {
39 .stream_name = "Playback",
40 .channels_min = 1,
41 .channels_max = 1,
42 .rates = SNDRV_PCM_RATE_8000,
43 .formats = SNDRV_PCM_FMTBIT_S16_LE,
44 },
45 .capture = {
46 .stream_name = "Capture",
47 .channels_min = 1,
48 .channels_max = 1,
49 .rates = SNDRV_PCM_RATE_8000,
50 .formats = SNDRV_PCM_FMTBIT_S16_LE,
51 },
52};
53
54static const struct snd_soc_codec_driver soc_codec_dev_gtm601 = {
55 .dapm_widgets = gtm601_dapm_widgets,
56 .num_dapm_widgets = ARRAY_SIZE(gtm601_dapm_widgets),
57 .dapm_routes = gtm601_dapm_routes,
58 .num_dapm_routes = ARRAY_SIZE(gtm601_dapm_routes),
59};
60
61static int gtm601_platform_probe(struct platform_device *pdev)
62{
63 return snd_soc_register_codec(&pdev->dev,
64 &soc_codec_dev_gtm601, &gtm601_dai, 1);
65}
66
67static int gtm601_platform_remove(struct platform_device *pdev)
68{
69 snd_soc_unregister_codec(&pdev->dev);
70 return 0;
71}
72
73#if defined(CONFIG_OF)
74static const struct of_device_id gtm601_codec_of_match[] = {
75 { .compatible = "option,gtm601", },
76 {},
77};
78MODULE_DEVICE_TABLE(of, gtm601_codec_of_match);
79#endif
80
81static struct platform_driver gtm601_codec_driver = {
82 .driver = {
83 .name = "gtm601",
84 .of_match_table = of_match_ptr(gtm601_codec_of_match),
85 },
86 .probe = gtm601_platform_probe,
87 .remove = gtm601_platform_remove,
88};
89
90module_platform_driver(gtm601_codec_driver);
91
92MODULE_DESCRIPTION("ASoC gtm601 driver");
93MODULE_AUTHOR("Marek Belisko <marek@goldelico.com>");
94MODULE_LICENSE("GPL");
95MODULE_ALIAS("platform:gtm601");
diff --git a/sound/soc/codecs/ics43432.c b/sound/soc/codecs/ics43432.c
new file mode 100644
index 000000000000..dd850b93938d
--- /dev/null
+++ b/sound/soc/codecs/ics43432.c
@@ -0,0 +1,76 @@
1/*
2 * I2S MEMS microphone driver for InvenSense ICS-43432
3 *
4 * - Non configurable.
5 * - I2S interface, 64 BCLs per frame, 32 bits per channel, 24 bit data
6 *
7 * Copyright (c) 2015 Axis Communications AB
8 *
9 * Licensed under GPL v2.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <sound/core.h>
16#include <sound/pcm.h>
17#include <sound/pcm_params.h>
18#include <sound/soc.h>
19#include <sound/initval.h>
20#include <sound/tlv.h>
21
22#define ICS43432_RATE_MIN 7190 /* Hz, from data sheet */
23#define ICS43432_RATE_MAX 52800 /* Hz, from data sheet */
24
25#define ICS43432_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32)
26
27static struct snd_soc_dai_driver ics43432_dai = {
28 .name = "ics43432-hifi",
29 .capture = {
30 .stream_name = "Capture",
31 .channels_min = 1,
32 .channels_max = 2,
33 .rate_min = ICS43432_RATE_MIN,
34 .rate_max = ICS43432_RATE_MAX,
35 .rates = SNDRV_PCM_RATE_CONTINUOUS,
36 .formats = ICS43432_FORMATS,
37 },
38};
39
40static struct snd_soc_codec_driver ics43432_codec_driver = {
41};
42
43static int ics43432_probe(struct platform_device *pdev)
44{
45 return snd_soc_register_codec(&pdev->dev, &ics43432_codec_driver,
46 &ics43432_dai, 1);
47}
48
49static int ics43432_remove(struct platform_device *pdev)
50{
51 snd_soc_unregister_codec(&pdev->dev);
52 return 0;
53}
54
55#ifdef CONFIG_OF
56static const struct of_device_id ics43432_ids[] = {
57 { .compatible = "invensense,ics43432", },
58 { }
59};
60MODULE_DEVICE_TABLE(of, ics43432_ids);
61#endif
62
63static struct platform_driver ics43432_driver = {
64 .driver = {
65 .name = "ics43432",
66 .of_match_table = of_match_ptr(ics43432_ids),
67 },
68 .probe = ics43432_probe,
69 .remove = ics43432_remove,
70};
71
72module_platform_driver(ics43432_driver);
73
74MODULE_DESCRIPTION("ASoC ICS43432 driver");
75MODULE_AUTHOR("Ricard Wanderlof <ricardw@axis.com>");
76MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index 58a43b11eb7e..be448373d39a 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -33,7 +33,7 @@
33 33
34 34
35/* Register default values for ISABELLE driver. */ 35/* Register default values for ISABELLE driver. */
36static struct reg_default isabelle_reg_defs[] = { 36static const struct reg_default isabelle_reg_defs[] = {
37 { 0, 0x00 }, 37 { 0, 0x00 },
38 { 1, 0x00 }, 38 { 1, 0x00 },
39 { 2, 0x00 }, 39 { 2, 0x00 },
@@ -1016,25 +1016,25 @@ static int isabelle_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
1016#define ISABELLE_FORMATS (SNDRV_PCM_FMTBIT_S20_3LE |\ 1016#define ISABELLE_FORMATS (SNDRV_PCM_FMTBIT_S20_3LE |\
1017 SNDRV_PCM_FMTBIT_S32_LE) 1017 SNDRV_PCM_FMTBIT_S32_LE)
1018 1018
1019static struct snd_soc_dai_ops isabelle_hs_dai_ops = { 1019static const struct snd_soc_dai_ops isabelle_hs_dai_ops = {
1020 .hw_params = isabelle_hw_params, 1020 .hw_params = isabelle_hw_params,
1021 .set_fmt = isabelle_set_dai_fmt, 1021 .set_fmt = isabelle_set_dai_fmt,
1022 .digital_mute = isabelle_hs_mute, 1022 .digital_mute = isabelle_hs_mute,
1023}; 1023};
1024 1024
1025static struct snd_soc_dai_ops isabelle_hf_dai_ops = { 1025static const struct snd_soc_dai_ops isabelle_hf_dai_ops = {
1026 .hw_params = isabelle_hw_params, 1026 .hw_params = isabelle_hw_params,
1027 .set_fmt = isabelle_set_dai_fmt, 1027 .set_fmt = isabelle_set_dai_fmt,
1028 .digital_mute = isabelle_hf_mute, 1028 .digital_mute = isabelle_hf_mute,
1029}; 1029};
1030 1030
1031static struct snd_soc_dai_ops isabelle_line_dai_ops = { 1031static const struct snd_soc_dai_ops isabelle_line_dai_ops = {
1032 .hw_params = isabelle_hw_params, 1032 .hw_params = isabelle_hw_params,
1033 .set_fmt = isabelle_set_dai_fmt, 1033 .set_fmt = isabelle_set_dai_fmt,
1034 .digital_mute = isabelle_line_mute, 1034 .digital_mute = isabelle_line_mute,
1035}; 1035};
1036 1036
1037static struct snd_soc_dai_ops isabelle_ul_dai_ops = { 1037static const struct snd_soc_dai_ops isabelle_ul_dai_ops = {
1038 .hw_params = isabelle_hw_params, 1038 .hw_params = isabelle_hw_params,
1039 .set_fmt = isabelle_set_dai_fmt, 1039 .set_fmt = isabelle_set_dai_fmt,
1040}; 1040};
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 9b2e38395eb9..9af5640e3446 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -30,7 +30,7 @@
30#include <asm/div64.h> 30#include <asm/div64.h>
31#include "lm49453.h" 31#include "lm49453.h"
32 32
33static struct reg_default lm49453_reg_defs[] = { 33static const struct reg_default lm49453_reg_defs[] = {
34 { 0, 0x00 }, 34 { 0, 0x00 },
35 { 1, 0x00 }, 35 { 1, 0x00 },
36 { 2, 0x00 }, 36 { 2, 0x00 },
@@ -188,7 +188,6 @@ static struct reg_default lm49453_reg_defs[] = {
188/* codec private data */ 188/* codec private data */
189struct lm49453_priv { 189struct lm49453_priv {
190 struct regmap *regmap; 190 struct regmap *regmap;
191 int fs_rate;
192}; 191};
193 192
194/* capture path controls */ 193/* capture path controls */
@@ -1112,13 +1111,10 @@ static int lm49453_hw_params(struct snd_pcm_substream *substream,
1112 struct snd_soc_dai *dai) 1111 struct snd_soc_dai *dai)
1113{ 1112{
1114 struct snd_soc_codec *codec = dai->codec; 1113 struct snd_soc_codec *codec = dai->codec;
1115 struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
1116 u16 clk_div = 0; 1114 u16 clk_div = 0;
1117 1115
1118 lm49453->fs_rate = params_rate(params);
1119
1120 /* Setting DAC clock dividers based on substream sample rate. */ 1116 /* Setting DAC clock dividers based on substream sample rate. */
1121 switch (lm49453->fs_rate) { 1117 switch (params_rate(params)) {
1122 case 8000: 1118 case 8000:
1123 case 16000: 1119 case 16000:
1124 case 32000: 1120 case 32000:
@@ -1291,35 +1287,35 @@ static int lm49453_set_bias_level(struct snd_soc_codec *codec,
1291#define LM49453_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ 1287#define LM49453_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
1292 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) 1288 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
1293 1289
1294static struct snd_soc_dai_ops lm49453_headset_dai_ops = { 1290static const struct snd_soc_dai_ops lm49453_headset_dai_ops = {
1295 .hw_params = lm49453_hw_params, 1291 .hw_params = lm49453_hw_params,
1296 .set_sysclk = lm49453_set_dai_sysclk, 1292 .set_sysclk = lm49453_set_dai_sysclk,
1297 .set_fmt = lm49453_set_dai_fmt, 1293 .set_fmt = lm49453_set_dai_fmt,
1298 .digital_mute = lm49453_hp_mute, 1294 .digital_mute = lm49453_hp_mute,
1299}; 1295};
1300 1296
1301static struct snd_soc_dai_ops lm49453_speaker_dai_ops = { 1297static const struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
1302 .hw_params = lm49453_hw_params, 1298 .hw_params = lm49453_hw_params,
1303 .set_sysclk = lm49453_set_dai_sysclk, 1299 .set_sysclk = lm49453_set_dai_sysclk,
1304 .set_fmt = lm49453_set_dai_fmt, 1300 .set_fmt = lm49453_set_dai_fmt,
1305 .digital_mute = lm49453_ls_mute, 1301 .digital_mute = lm49453_ls_mute,
1306}; 1302};
1307 1303
1308static struct snd_soc_dai_ops lm49453_haptic_dai_ops = { 1304static const struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
1309 .hw_params = lm49453_hw_params, 1305 .hw_params = lm49453_hw_params,
1310 .set_sysclk = lm49453_set_dai_sysclk, 1306 .set_sysclk = lm49453_set_dai_sysclk,
1311 .set_fmt = lm49453_set_dai_fmt, 1307 .set_fmt = lm49453_set_dai_fmt,
1312 .digital_mute = lm49453_ha_mute, 1308 .digital_mute = lm49453_ha_mute,
1313}; 1309};
1314 1310
1315static struct snd_soc_dai_ops lm49453_ep_dai_ops = { 1311static const struct snd_soc_dai_ops lm49453_ep_dai_ops = {
1316 .hw_params = lm49453_hw_params, 1312 .hw_params = lm49453_hw_params,
1317 .set_sysclk = lm49453_set_dai_sysclk, 1313 .set_sysclk = lm49453_set_dai_sysclk,
1318 .set_fmt = lm49453_set_dai_fmt, 1314 .set_fmt = lm49453_set_dai_fmt,
1319 .digital_mute = lm49453_ep_mute, 1315 .digital_mute = lm49453_ep_mute,
1320}; 1316};
1321 1317
1322static struct snd_soc_dai_ops lm49453_lineout_dai_ops = { 1318static const struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
1323 .hw_params = lm49453_hw_params, 1319 .hw_params = lm49453_hw_params,
1324 .set_sysclk = lm49453_set_dai_sysclk, 1320 .set_sysclk = lm49453_set_dai_sysclk,
1325 .set_fmt = lm49453_set_dai_fmt, 1321 .set_fmt = lm49453_set_dai_fmt,
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index 1526aef2f2a9..bd41128c6f0b 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -35,7 +35,7 @@ struct max9768 {
35 u32 flags; 35 u32 flags;
36}; 36};
37 37
38static struct reg_default max9768_default_regs[] = { 38static const struct reg_default max9768_default_regs[] = {
39 { 0, 0 }, 39 { 0, 0 },
40 { 3, MAX9768_CTRL_FILTERLESS}, 40 { 3, MAX9768_CTRL_FILTERLESS},
41}; 41};
@@ -43,8 +43,8 @@ static struct reg_default max9768_default_regs[] = {
43static int max9768_get_gpio(struct snd_kcontrol *kcontrol, 43static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
44 struct snd_ctl_elem_value *ucontrol) 44 struct snd_ctl_elem_value *ucontrol)
45{ 45{
46 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 46 struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
47 struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec); 47 struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
48 int val = gpio_get_value_cansleep(max9768->mute_gpio); 48 int val = gpio_get_value_cansleep(max9768->mute_gpio);
49 49
50 ucontrol->value.integer.value[0] = !val; 50 ucontrol->value.integer.value[0] = !val;
@@ -55,8 +55,8 @@ static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
55static int max9768_set_gpio(struct snd_kcontrol *kcontrol, 55static int max9768_set_gpio(struct snd_kcontrol *kcontrol,
56 struct snd_ctl_elem_value *ucontrol) 56 struct snd_ctl_elem_value *ucontrol)
57{ 57{
58 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 58 struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
59 struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec); 59 struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
60 60
61 gpio_set_value_cansleep(max9768->mute_gpio, !ucontrol->value.integer.value[0]); 61 gpio_set_value_cansleep(max9768->mute_gpio, !ucontrol->value.integer.value[0]);
62 62
@@ -130,19 +130,20 @@ static const struct snd_soc_dapm_route max9768_dapm_routes[] = {
130 { "OUT-", NULL, "IN" }, 130 { "OUT-", NULL, "IN" },
131}; 131};
132 132
133static int max9768_probe(struct snd_soc_codec *codec) 133static int max9768_probe(struct snd_soc_component *component)
134{ 134{
135 struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec); 135 struct max9768 *max9768 = snd_soc_component_get_drvdata(component);
136 int ret; 136 int ret;
137 137
138 if (max9768->flags & MAX9768_FLAG_CLASSIC_PWM) { 138 if (max9768->flags & MAX9768_FLAG_CLASSIC_PWM) {
139 ret = snd_soc_write(codec, MAX9768_CTRL, MAX9768_CTRL_PWM); 139 ret = regmap_write(max9768->regmap, MAX9768_CTRL,
140 MAX9768_CTRL_PWM);
140 if (ret) 141 if (ret)
141 return ret; 142 return ret;
142 } 143 }
143 144
144 if (gpio_is_valid(max9768->mute_gpio)) { 145 if (gpio_is_valid(max9768->mute_gpio)) {
145 ret = snd_soc_add_codec_controls(codec, max9768_mute, 146 ret = snd_soc_add_component_controls(component, max9768_mute,
146 ARRAY_SIZE(max9768_mute)); 147 ARRAY_SIZE(max9768_mute));
147 if (ret) 148 if (ret)
148 return ret; 149 return ret;
@@ -151,7 +152,7 @@ static int max9768_probe(struct snd_soc_codec *codec)
151 return 0; 152 return 0;
152} 153}
153 154
154static struct snd_soc_codec_driver max9768_codec_driver = { 155static struct snd_soc_component_driver max9768_component_driver = {
155 .probe = max9768_probe, 156 .probe = max9768_probe,
156 .controls = max9768_volume, 157 .controls = max9768_volume,
157 .num_controls = ARRAY_SIZE(max9768_volume), 158 .num_controls = ARRAY_SIZE(max9768_volume),
@@ -183,11 +184,13 @@ static int max9768_i2c_probe(struct i2c_client *client,
183 184
184 if (pdata) { 185 if (pdata) {
185 /* Mute on powerup to avoid clicks */ 186 /* Mute on powerup to avoid clicks */
186 err = gpio_request_one(pdata->mute_gpio, GPIOF_INIT_HIGH, "MAX9768 Mute"); 187 err = devm_gpio_request_one(&client->dev, pdata->mute_gpio,
188 GPIOF_INIT_HIGH, "MAX9768 Mute");
187 max9768->mute_gpio = err ?: pdata->mute_gpio; 189 max9768->mute_gpio = err ?: pdata->mute_gpio;
188 190
189 /* Activate chip by releasing shutdown, enables I2C */ 191 /* Activate chip by releasing shutdown, enables I2C */
190 err = gpio_request_one(pdata->shdn_gpio, GPIOF_INIT_HIGH, "MAX9768 Shutdown"); 192 err = devm_gpio_request_one(&client->dev, pdata->shdn_gpio,
193 GPIOF_INIT_HIGH, "MAX9768 Shutdown");
191 max9768->shdn_gpio = err ?: pdata->shdn_gpio; 194 max9768->shdn_gpio = err ?: pdata->shdn_gpio;
192 195
193 max9768->flags = pdata->flags; 196 max9768->flags = pdata->flags;
@@ -199,38 +202,11 @@ static int max9768_i2c_probe(struct i2c_client *client,
199 i2c_set_clientdata(client, max9768); 202 i2c_set_clientdata(client, max9768);
200 203
201 max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config); 204 max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config);
202 if (IS_ERR(max9768->regmap)) { 205 if (IS_ERR(max9768->regmap))
203 err = PTR_ERR(max9768->regmap); 206 return PTR_ERR(max9768->regmap);
204 goto err_gpio_free;
205 }
206
207 err = snd_soc_register_codec(&client->dev, &max9768_codec_driver, NULL, 0);
208 if (err)
209 goto err_gpio_free;
210
211 return 0;
212
213 err_gpio_free:
214 if (gpio_is_valid(max9768->shdn_gpio))
215 gpio_free(max9768->shdn_gpio);
216 if (gpio_is_valid(max9768->mute_gpio))
217 gpio_free(max9768->mute_gpio);
218
219 return err;
220}
221
222static int max9768_i2c_remove(struct i2c_client *client)
223{
224 struct max9768 *max9768 = i2c_get_clientdata(client);
225 207
226 snd_soc_unregister_codec(&client->dev); 208 return devm_snd_soc_register_component(&client->dev,
227 209 &max9768_component_driver, NULL, 0);
228 if (gpio_is_valid(max9768->shdn_gpio))
229 gpio_free(max9768->shdn_gpio);
230 if (gpio_is_valid(max9768->mute_gpio))
231 gpio_free(max9768->mute_gpio);
232
233 return 0;
234} 210}
235 211
236static const struct i2c_device_id max9768_i2c_id[] = { 212static const struct i2c_device_id max9768_i2c_id[] = {
@@ -244,7 +220,6 @@ static struct i2c_driver max9768_i2c_driver = {
244 .name = "max9768", 220 .name = "max9768",
245 }, 221 },
246 .probe = max9768_i2c_probe, 222 .probe = max9768_i2c_probe,
247 .remove = max9768_i2c_remove,
248 .id_table = max9768_i2c_id, 223 .id_table = max9768_i2c_id,
249}; 224};
250module_i2c_driver(max9768_i2c_driver); 225module_i2c_driver(max9768_i2c_driver);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 99c2daa0eebf..2c2df1790fd3 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -258,292 +258,36 @@ static const struct reg_default max98088_reg[] = {
258 { 0xc9, 0x00 }, /* C9 DAI2 biquad */ 258 { 0xc9, 0x00 }, /* C9 DAI2 biquad */
259}; 259};
260 260
261static struct {
262 int readable;
263 int writable;
264 int vol;
265} max98088_access[M98088_REG_CNT] = {
266 { 0xFF, 0xFF, 1 }, /* 00 IRQ status */
267 { 0xFF, 0x00, 1 }, /* 01 MIC status */
268 { 0xFF, 0x00, 1 }, /* 02 jack status */
269 { 0x1F, 0x1F, 1 }, /* 03 battery voltage */
270 { 0xFF, 0xFF, 0 }, /* 04 */
271 { 0xFF, 0xFF, 0 }, /* 05 */
272 { 0xFF, 0xFF, 0 }, /* 06 */
273 { 0xFF, 0xFF, 0 }, /* 07 */
274 { 0xFF, 0xFF, 0 }, /* 08 */
275 { 0xFF, 0xFF, 0 }, /* 09 */
276 { 0xFF, 0xFF, 0 }, /* 0A */
277 { 0xFF, 0xFF, 0 }, /* 0B */
278 { 0xFF, 0xFF, 0 }, /* 0C */
279 { 0xFF, 0xFF, 0 }, /* 0D */
280 { 0xFF, 0xFF, 0 }, /* 0E */
281 { 0xFF, 0xFF, 0 }, /* 0F interrupt enable */
282
283 { 0xFF, 0xFF, 0 }, /* 10 master clock */
284 { 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */
285 { 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */
286 { 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */
287 { 0xFF, 0xFF, 0 }, /* 14 DAI1 format */
288 { 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */
289 { 0xFF, 0xFF, 0 }, /* 16 DAI1 config */
290 { 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */
291 { 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */
292 { 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */
293 { 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */
294 { 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */
295 { 0xFF, 0xFF, 0 }, /* 1C DAI2 format */
296 { 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */
297 { 0xFF, 0xFF, 0 }, /* 1E DAI2 config */
298 { 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */
299
300 { 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */
301 { 0xFF, 0xFF, 0 }, /* 21 data config */
302 { 0xFF, 0xFF, 0 }, /* 22 DAC mixer */
303 { 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */
304 { 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */
305 { 0xFF, 0xFF, 0 }, /* 25 left HP mixer */
306 { 0xFF, 0xFF, 0 }, /* 26 right HP mixer */
307 { 0xFF, 0xFF, 0 }, /* 27 HP control */
308 { 0xFF, 0xFF, 0 }, /* 28 left REC mixer */
309 { 0xFF, 0xFF, 0 }, /* 29 right REC mixer */
310 { 0xFF, 0xFF, 0 }, /* 2A REC control */
311 { 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */
312 { 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */
313 { 0xFF, 0xFF, 0 }, /* 2D SPK control */
314 { 0xFF, 0xFF, 0 }, /* 2E sidetone */
315 { 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */
316
317 { 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */
318 { 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */
319 { 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */
320 { 0xFF, 0xFF, 0 }, /* 33 left ADC level */
321 { 0xFF, 0xFF, 0 }, /* 34 right ADC level */
322 { 0xFF, 0xFF, 0 }, /* 35 MIC1 level */
323 { 0xFF, 0xFF, 0 }, /* 36 MIC2 level */
324 { 0xFF, 0xFF, 0 }, /* 37 INA level */
325 { 0xFF, 0xFF, 0 }, /* 38 INB level */
326 { 0xFF, 0xFF, 0 }, /* 39 left HP volume */
327 { 0xFF, 0xFF, 0 }, /* 3A right HP volume */
328 { 0xFF, 0xFF, 0 }, /* 3B left REC volume */
329 { 0xFF, 0xFF, 0 }, /* 3C right REC volume */
330 { 0xFF, 0xFF, 0 }, /* 3D left SPK volume */
331 { 0xFF, 0xFF, 0 }, /* 3E right SPK volume */
332 { 0xFF, 0xFF, 0 }, /* 3F MIC config */
333
334 { 0xFF, 0xFF, 0 }, /* 40 MIC threshold */
335 { 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */
336 { 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */
337 { 0xFF, 0xFF, 0 }, /* 43 ALC */
338 { 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */
339 { 0xFF, 0xFF, 0 }, /* 45 power limiter config */
340 { 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */
341 { 0xFF, 0xFF, 0 }, /* 47 audio input */
342 { 0xFF, 0xFF, 0 }, /* 48 microphone */
343 { 0xFF, 0xFF, 0 }, /* 49 level control */
344 { 0xFF, 0xFF, 0 }, /* 4A bypass switches */
345 { 0xFF, 0xFF, 0 }, /* 4B jack detect */
346 { 0xFF, 0xFF, 0 }, /* 4C input enable */
347 { 0xFF, 0xFF, 0 }, /* 4D output enable */
348 { 0xFF, 0xFF, 0 }, /* 4E bias control */
349 { 0xFF, 0xFF, 0 }, /* 4F DAC power */
350
351 { 0xFF, 0xFF, 0 }, /* 50 DAC power */
352 { 0xFF, 0xFF, 0 }, /* 51 system */
353 { 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */
354 { 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */
355 { 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */
356 { 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */
357 { 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */
358 { 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */
359 { 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */
360 { 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */
361 { 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */
362 { 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */
363 { 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */
364 { 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */
365 { 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */
366 { 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */
367
368 { 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */
369 { 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */
370 { 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */
371 { 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */
372 { 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */
373 { 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */
374 { 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */
375 { 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */
376 { 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */
377 { 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */
378 { 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */
379 { 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */
380 { 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */
381 { 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */
382 { 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */
383 { 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */
384
385 { 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */
386 { 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */
387 { 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */
388 { 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */
389 { 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */
390 { 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */
391 { 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */
392 { 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */
393 { 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */
394 { 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */
395 { 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */
396 { 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */
397 { 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */
398 { 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */
399 { 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */
400 { 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */
401
402 { 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */
403 { 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */
404 { 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */
405 { 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */
406 { 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */
407 { 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */
408 { 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */
409 { 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */
410 { 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */
411 { 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */
412 { 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */
413 { 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */
414 { 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */
415 { 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */
416 { 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */
417 { 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */
418
419 { 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */
420 { 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */
421 { 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */
422 { 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */
423 { 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */
424 { 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */
425 { 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */
426 { 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */
427 { 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */
428 { 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */
429 { 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */
430 { 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */
431 { 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */
432 { 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */
433 { 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */
434 { 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */
435
436 { 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */
437 { 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */
438 { 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */
439 { 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */
440 { 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */
441 { 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */
442 { 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */
443 { 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */
444 { 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */
445 { 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */
446 { 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */
447 { 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */
448 { 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */
449 { 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */
450 { 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */
451 { 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */
452
453 { 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */
454 { 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */
455 { 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */
456 { 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */
457 { 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */
458 { 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */
459 { 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */
460 { 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */
461 { 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */
462 { 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */
463 { 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */
464 { 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */
465 { 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */
466 { 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */
467 { 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */
468 { 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */
469
470 { 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */
471 { 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */
472 { 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */
473 { 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */
474 { 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */
475 { 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */
476 { 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */
477 { 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */
478 { 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */
479 { 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */
480 { 0x00, 0x00, 0 }, /* CA */
481 { 0x00, 0x00, 0 }, /* CB */
482 { 0x00, 0x00, 0 }, /* CC */
483 { 0x00, 0x00, 0 }, /* CD */
484 { 0x00, 0x00, 0 }, /* CE */
485 { 0x00, 0x00, 0 }, /* CF */
486
487 { 0x00, 0x00, 0 }, /* D0 */
488 { 0x00, 0x00, 0 }, /* D1 */
489 { 0x00, 0x00, 0 }, /* D2 */
490 { 0x00, 0x00, 0 }, /* D3 */
491 { 0x00, 0x00, 0 }, /* D4 */
492 { 0x00, 0x00, 0 }, /* D5 */
493 { 0x00, 0x00, 0 }, /* D6 */
494 { 0x00, 0x00, 0 }, /* D7 */
495 { 0x00, 0x00, 0 }, /* D8 */
496 { 0x00, 0x00, 0 }, /* D9 */
497 { 0x00, 0x00, 0 }, /* DA */
498 { 0x00, 0x00, 0 }, /* DB */
499 { 0x00, 0x00, 0 }, /* DC */
500 { 0x00, 0x00, 0 }, /* DD */
501 { 0x00, 0x00, 0 }, /* DE */
502 { 0x00, 0x00, 0 }, /* DF */
503
504 { 0x00, 0x00, 0 }, /* E0 */
505 { 0x00, 0x00, 0 }, /* E1 */
506 { 0x00, 0x00, 0 }, /* E2 */
507 { 0x00, 0x00, 0 }, /* E3 */
508 { 0x00, 0x00, 0 }, /* E4 */
509 { 0x00, 0x00, 0 }, /* E5 */
510 { 0x00, 0x00, 0 }, /* E6 */
511 { 0x00, 0x00, 0 }, /* E7 */
512 { 0x00, 0x00, 0 }, /* E8 */
513 { 0x00, 0x00, 0 }, /* E9 */
514 { 0x00, 0x00, 0 }, /* EA */
515 { 0x00, 0x00, 0 }, /* EB */
516 { 0x00, 0x00, 0 }, /* EC */
517 { 0x00, 0x00, 0 }, /* ED */
518 { 0x00, 0x00, 0 }, /* EE */
519 { 0x00, 0x00, 0 }, /* EF */
520
521 { 0x00, 0x00, 0 }, /* F0 */
522 { 0x00, 0x00, 0 }, /* F1 */
523 { 0x00, 0x00, 0 }, /* F2 */
524 { 0x00, 0x00, 0 }, /* F3 */
525 { 0x00, 0x00, 0 }, /* F4 */
526 { 0x00, 0x00, 0 }, /* F5 */
527 { 0x00, 0x00, 0 }, /* F6 */
528 { 0x00, 0x00, 0 }, /* F7 */
529 { 0x00, 0x00, 0 }, /* F8 */
530 { 0x00, 0x00, 0 }, /* F9 */
531 { 0x00, 0x00, 0 }, /* FA */
532 { 0x00, 0x00, 0 }, /* FB */
533 { 0x00, 0x00, 0 }, /* FC */
534 { 0x00, 0x00, 0 }, /* FD */
535 { 0x00, 0x00, 0 }, /* FE */
536 { 0xFF, 0x00, 1 }, /* FF */
537};
538
539static bool max98088_readable_register(struct device *dev, unsigned int reg) 261static bool max98088_readable_register(struct device *dev, unsigned int reg)
540{ 262{
541 return max98088_access[reg].readable; 263 switch (reg) {
264 case M98088_REG_00_IRQ_STATUS ... 0xC9:
265 case M98088_REG_FF_REV_ID:
266 return true;
267 default:
268 return false;
269 }
270}
271
272static bool max98088_writeable_register(struct device *dev, unsigned int reg)
273{
274 switch (reg) {
275 case M98088_REG_03_BATTERY_VOLTAGE ... 0xC9:
276 return true;
277 default:
278 return false;
279 }
542} 280}
543 281
544static bool max98088_volatile_register(struct device *dev, unsigned int reg) 282static bool max98088_volatile_register(struct device *dev, unsigned int reg)
545{ 283{
546 return max98088_access[reg].vol; 284 switch (reg) {
285 case M98088_REG_00_IRQ_STATUS ... M98088_REG_03_BATTERY_VOLTAGE:
286 case M98088_REG_FF_REV_ID:
287 return true;
288 default:
289 return false;
290 }
547} 291}
548 292
549static const struct regmap_config max98088_regmap = { 293static const struct regmap_config max98088_regmap = {
@@ -551,6 +295,7 @@ static const struct regmap_config max98088_regmap = {
551 .val_bits = 8, 295 .val_bits = 8,
552 296
553 .readable_reg = max98088_readable_register, 297 .readable_reg = max98088_readable_register,
298 .writeable_reg = max98088_writeable_register,
554 .volatile_reg = max98088_volatile_register, 299 .volatile_reg = max98088_volatile_register,
555 .max_register = 0xff, 300 .max_register = 0xff,
556 301
diff --git a/sound/soc/codecs/max98088.h b/sound/soc/codecs/max98088.h
index be89a4f4aab8..efa39bf46742 100644
--- a/sound/soc/codecs/max98088.h
+++ b/sound/soc/codecs/max98088.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define M98088_REG_00_IRQ_STATUS 0x00 17#define M98088_REG_00_IRQ_STATUS 0x00
18#define M98088_REG_01_MIC_STATUS 0x01 18#define M98088_REG_01_MIC_STATUS 0x01
19#define M98088_REG_02_JACK_STAUS 0x02 19#define M98088_REG_02_JACK_STATUS 0x02
20#define M98088_REG_03_BATTERY_VOLTAGE 0x03 20#define M98088_REG_03_BATTERY_VOLTAGE 0x03
21#define M98088_REG_0F_IRQ_ENABLE 0x0F 21#define M98088_REG_0F_IRQ_ENABLE 0x0F
22#define M98088_REG_10_SYS_CLK 0x10 22#define M98088_REG_10_SYS_CLK 0x10
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index c9db085e6cf1..cdf534e7a285 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -267,75 +267,8 @@ static bool max98090_volatile_register(struct device *dev, unsigned int reg)
267static bool max98090_readable_register(struct device *dev, unsigned int reg) 267static bool max98090_readable_register(struct device *dev, unsigned int reg)
268{ 268{
269 switch (reg) { 269 switch (reg) {
270 case M98090_REG_DEVICE_STATUS: 270 case M98090_REG_DEVICE_STATUS ... M98090_REG_INTERRUPT_S:
271 case M98090_REG_JACK_STATUS: 271 case M98090_REG_LINE_INPUT_CONFIG ... 0xD1:
272 case M98090_REG_INTERRUPT_S:
273 case M98090_REG_RESERVED:
274 case M98090_REG_LINE_INPUT_CONFIG:
275 case M98090_REG_LINE_INPUT_LEVEL:
276 case M98090_REG_INPUT_MODE:
277 case M98090_REG_MIC1_INPUT_LEVEL:
278 case M98090_REG_MIC2_INPUT_LEVEL:
279 case M98090_REG_MIC_BIAS_VOLTAGE:
280 case M98090_REG_DIGITAL_MIC_ENABLE:
281 case M98090_REG_DIGITAL_MIC_CONFIG:
282 case M98090_REG_LEFT_ADC_MIXER:
283 case M98090_REG_RIGHT_ADC_MIXER:
284 case M98090_REG_LEFT_ADC_LEVEL:
285 case M98090_REG_RIGHT_ADC_LEVEL:
286 case M98090_REG_ADC_BIQUAD_LEVEL:
287 case M98090_REG_ADC_SIDETONE:
288 case M98090_REG_SYSTEM_CLOCK:
289 case M98090_REG_CLOCK_MODE:
290 case M98090_REG_CLOCK_RATIO_NI_MSB:
291 case M98090_REG_CLOCK_RATIO_NI_LSB:
292 case M98090_REG_CLOCK_RATIO_MI_MSB:
293 case M98090_REG_CLOCK_RATIO_MI_LSB:
294 case M98090_REG_MASTER_MODE:
295 case M98090_REG_INTERFACE_FORMAT:
296 case M98090_REG_TDM_CONTROL:
297 case M98090_REG_TDM_FORMAT:
298 case M98090_REG_IO_CONFIGURATION:
299 case M98090_REG_FILTER_CONFIG:
300 case M98090_REG_DAI_PLAYBACK_LEVEL:
301 case M98090_REG_DAI_PLAYBACK_LEVEL_EQ:
302 case M98090_REG_LEFT_HP_MIXER:
303 case M98090_REG_RIGHT_HP_MIXER:
304 case M98090_REG_HP_CONTROL:
305 case M98090_REG_LEFT_HP_VOLUME:
306 case M98090_REG_RIGHT_HP_VOLUME:
307 case M98090_REG_LEFT_SPK_MIXER:
308 case M98090_REG_RIGHT_SPK_MIXER:
309 case M98090_REG_SPK_CONTROL:
310 case M98090_REG_LEFT_SPK_VOLUME:
311 case M98090_REG_RIGHT_SPK_VOLUME:
312 case M98090_REG_DRC_TIMING:
313 case M98090_REG_DRC_COMPRESSOR:
314 case M98090_REG_DRC_EXPANDER:
315 case M98090_REG_DRC_GAIN:
316 case M98090_REG_RCV_LOUTL_MIXER:
317 case M98090_REG_RCV_LOUTL_CONTROL:
318 case M98090_REG_RCV_LOUTL_VOLUME:
319 case M98090_REG_LOUTR_MIXER:
320 case M98090_REG_LOUTR_CONTROL:
321 case M98090_REG_LOUTR_VOLUME:
322 case M98090_REG_JACK_DETECT:
323 case M98090_REG_INPUT_ENABLE:
324 case M98090_REG_OUTPUT_ENABLE:
325 case M98090_REG_LEVEL_CONTROL:
326 case M98090_REG_DSP_FILTER_ENABLE:
327 case M98090_REG_BIAS_CONTROL:
328 case M98090_REG_DAC_CONTROL:
329 case M98090_REG_ADC_CONTROL:
330 case M98090_REG_DEVICE_SHUTDOWN:
331 case M98090_REG_EQUALIZER_BASE ... M98090_REG_EQUALIZER_BASE + 0x68:
332 case M98090_REG_RECORD_BIQUAD_BASE ... M98090_REG_RECORD_BIQUAD_BASE + 0x0E:
333 case M98090_REG_DMIC3_VOLUME:
334 case M98090_REG_DMIC4_VOLUME:
335 case M98090_REG_DMIC34_BQ_PREATTEN:
336 case M98090_REG_RECORD_TDM_SLOT:
337 case M98090_REG_SAMPLE_RATE:
338 case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
339 case M98090_REG_REVISION_ID: 272 case M98090_REG_REVISION_ID:
340 return true; 273 return true;
341 default: 274 default:
@@ -850,6 +783,19 @@ static int max98090_micinput_event(struct snd_soc_dapm_widget *w,
850 return 0; 783 return 0;
851} 784}
852 785
786static int max98090_shdn_event(struct snd_soc_dapm_widget *w,
787 struct snd_kcontrol *kcontrol, int event)
788{
789 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
790 struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
791
792 if (event & SND_SOC_DAPM_POST_PMU)
793 max98090->shdn_pending = true;
794
795 return 0;
796
797}
798
853static const char *mic1_mux_text[] = { "IN12", "IN56" }; 799static const char *mic1_mux_text[] = { "IN12", "IN56" };
854 800
855static SOC_ENUM_SINGLE_DECL(mic1_mux_enum, 801static SOC_ENUM_SINGLE_DECL(mic1_mux_enum,
@@ -1158,9 +1104,11 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
1158 SND_SOC_DAPM_SUPPLY("SDOEN", M98090_REG_IO_CONFIGURATION, 1104 SND_SOC_DAPM_SUPPLY("SDOEN", M98090_REG_IO_CONFIGURATION,
1159 M98090_SDOEN_SHIFT, 0, NULL, 0), 1105 M98090_SDOEN_SHIFT, 0, NULL, 0),
1160 SND_SOC_DAPM_SUPPLY("DMICL_ENA", M98090_REG_DIGITAL_MIC_ENABLE, 1106 SND_SOC_DAPM_SUPPLY("DMICL_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
1161 M98090_DIGMICL_SHIFT, 0, NULL, 0), 1107 M98090_DIGMICL_SHIFT, 0, max98090_shdn_event,
1108 SND_SOC_DAPM_POST_PMU),
1162 SND_SOC_DAPM_SUPPLY("DMICR_ENA", M98090_REG_DIGITAL_MIC_ENABLE, 1109 SND_SOC_DAPM_SUPPLY("DMICR_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
1163 M98090_DIGMICR_SHIFT, 0, NULL, 0), 1110 M98090_DIGMICR_SHIFT, 0, max98090_shdn_event,
1111 SND_SOC_DAPM_POST_PMU),
1164 SND_SOC_DAPM_SUPPLY("AHPF", M98090_REG_FILTER_CONFIG, 1112 SND_SOC_DAPM_SUPPLY("AHPF", M98090_REG_FILTER_CONFIG,
1165 M98090_AHPF_SHIFT, 0, NULL, 0), 1113 M98090_AHPF_SHIFT, 0, NULL, 0),
1166 1114
@@ -1205,10 +1153,12 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
1205 &max98090_right_adc_mixer_controls[0], 1153 &max98090_right_adc_mixer_controls[0],
1206 ARRAY_SIZE(max98090_right_adc_mixer_controls)), 1154 ARRAY_SIZE(max98090_right_adc_mixer_controls)),
1207 1155
1208 SND_SOC_DAPM_ADC("ADCL", NULL, M98090_REG_INPUT_ENABLE, 1156 SND_SOC_DAPM_ADC_E("ADCL", NULL, M98090_REG_INPUT_ENABLE,
1209 M98090_ADLEN_SHIFT, 0), 1157 M98090_ADLEN_SHIFT, 0, max98090_shdn_event,
1210 SND_SOC_DAPM_ADC("ADCR", NULL, M98090_REG_INPUT_ENABLE, 1158 SND_SOC_DAPM_POST_PMU),
1211 M98090_ADREN_SHIFT, 0), 1159 SND_SOC_DAPM_ADC_E("ADCR", NULL, M98090_REG_INPUT_ENABLE,
1160 M98090_ADREN_SHIFT, 0, max98090_shdn_event,
1161 SND_SOC_DAPM_POST_PMU),
1212 1162
1213 SND_SOC_DAPM_AIF_OUT("AIFOUTL", "HiFi Capture", 0, 1163 SND_SOC_DAPM_AIF_OUT("AIFOUTL", "HiFi Capture", 0,
1214 SND_SOC_NOPM, 0, 0), 1164 SND_SOC_NOPM, 0, 0),
@@ -1801,10 +1751,13 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
1801 if (IS_ERR(max98090->mclk)) 1751 if (IS_ERR(max98090->mclk))
1802 break; 1752 break;
1803 1753
1804 if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) 1754 if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
1805 clk_disable_unprepare(max98090->mclk); 1755 clk_disable_unprepare(max98090->mclk);
1806 else 1756 } else {
1807 clk_prepare_enable(max98090->mclk); 1757 ret = clk_prepare_enable(max98090->mclk);
1758 if (ret)
1759 return ret;
1760 }
1808 break; 1761 break;
1809 1762
1810 case SND_SOC_BIAS_STANDBY: 1763 case SND_SOC_BIAS_STANDBY:
@@ -2383,7 +2336,7 @@ EXPORT_SYMBOL_GPL(max98090_mic_detect);
2383#define MAX98090_RATES SNDRV_PCM_RATE_8000_96000 2336#define MAX98090_RATES SNDRV_PCM_RATE_8000_96000
2384#define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) 2337#define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
2385 2338
2386static struct snd_soc_dai_ops max98090_dai_ops = { 2339static const struct snd_soc_dai_ops max98090_dai_ops = {
2387 .set_sysclk = max98090_dai_set_sysclk, 2340 .set_sysclk = max98090_dai_set_sysclk,
2388 .set_fmt = max98090_dai_set_fmt, 2341 .set_fmt = max98090_dai_set_fmt,
2389 .set_tdm_slot = max98090_set_tdm_slot, 2342 .set_tdm_slot = max98090_set_tdm_slot,
@@ -2536,9 +2489,26 @@ static int max98090_remove(struct snd_soc_codec *codec)
2536 return 0; 2489 return 0;
2537} 2490}
2538 2491
2492static void max98090_seq_notifier(struct snd_soc_dapm_context *dapm,
2493 enum snd_soc_dapm_type event, int subseq)
2494{
2495 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
2496 struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
2497
2498 if (max98090->shdn_pending) {
2499 snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
2500 M98090_SHDNN_MASK, 0);
2501 msleep(40);
2502 snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
2503 M98090_SHDNN_MASK, M98090_SHDNN_MASK);
2504 max98090->shdn_pending = false;
2505 }
2506}
2507
2539static struct snd_soc_codec_driver soc_codec_dev_max98090 = { 2508static struct snd_soc_codec_driver soc_codec_dev_max98090 = {
2540 .probe = max98090_probe, 2509 .probe = max98090_probe,
2541 .remove = max98090_remove, 2510 .remove = max98090_remove,
2511 .seq_notifier = max98090_seq_notifier,
2542 .set_bias_level = max98090_set_bias_level, 2512 .set_bias_level = max98090_set_bias_level,
2543}; 2513};
2544 2514
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index 21ff743f5af2..bc610d9a9ecb 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -1543,6 +1543,7 @@ struct max98090_priv {
1543 unsigned int pa2en; 1543 unsigned int pa2en;
1544 unsigned int sidetone; 1544 unsigned int sidetone;
1545 bool master; 1545 bool master;
1546 bool shdn_pending;
1546}; 1547};
1547 1548
1548int max98090_mic_detect(struct snd_soc_codec *codec, 1549int max98090_mic_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index ea45c355d324..ad4909e17327 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -202,300 +202,36 @@ static const struct reg_default max98095_reg_def[] = {
202 { 0xff, 0x00 }, /* FF */ 202 { 0xff, 0x00 }, /* FF */
203}; 203};
204 204
205static struct {
206 int readable;
207 int writable;
208} max98095_access[M98095_REG_CNT] = {
209 { 0x00, 0x00 }, /* 00 */
210 { 0xFF, 0x00 }, /* 01 */
211 { 0xFF, 0x00 }, /* 02 */
212 { 0xFF, 0x00 }, /* 03 */
213 { 0xFF, 0x00 }, /* 04 */
214 { 0xFF, 0x00 }, /* 05 */
215 { 0xFF, 0x00 }, /* 06 */
216 { 0xFF, 0x00 }, /* 07 */
217 { 0xFF, 0x00 }, /* 08 */
218 { 0xFF, 0x00 }, /* 09 */
219 { 0xFF, 0x00 }, /* 0A */
220 { 0xFF, 0x00 }, /* 0B */
221 { 0xFF, 0x00 }, /* 0C */
222 { 0xFF, 0x00 }, /* 0D */
223 { 0xFF, 0x00 }, /* 0E */
224 { 0xFF, 0x9F }, /* 0F */
225 { 0xFF, 0xFF }, /* 10 */
226 { 0xFF, 0xFF }, /* 11 */
227 { 0xFF, 0xFF }, /* 12 */
228 { 0xFF, 0xFF }, /* 13 */
229 { 0xFF, 0xFF }, /* 14 */
230 { 0xFF, 0xFF }, /* 15 */
231 { 0xFF, 0xFF }, /* 16 */
232 { 0xFF, 0xFF }, /* 17 */
233 { 0xFF, 0xFF }, /* 18 */
234 { 0xFF, 0xFF }, /* 19 */
235 { 0xFF, 0xFF }, /* 1A */
236 { 0xFF, 0xFF }, /* 1B */
237 { 0xFF, 0xFF }, /* 1C */
238 { 0xFF, 0xFF }, /* 1D */
239 { 0xFF, 0x77 }, /* 1E */
240 { 0xFF, 0x77 }, /* 1F */
241 { 0xFF, 0x77 }, /* 20 */
242 { 0xFF, 0x77 }, /* 21 */
243 { 0xFF, 0x77 }, /* 22 */
244 { 0xFF, 0x77 }, /* 23 */
245 { 0xFF, 0xFF }, /* 24 */
246 { 0xFF, 0x7F }, /* 25 */
247 { 0xFF, 0x31 }, /* 26 */
248 { 0xFF, 0xFF }, /* 27 */
249 { 0xFF, 0xFF }, /* 28 */
250 { 0xFF, 0xFF }, /* 29 */
251 { 0xFF, 0xF7 }, /* 2A */
252 { 0xFF, 0x2F }, /* 2B */
253 { 0xFF, 0xEF }, /* 2C */
254 { 0xFF, 0xFF }, /* 2D */
255 { 0xFF, 0xFF }, /* 2E */
256 { 0xFF, 0xFF }, /* 2F */
257 { 0xFF, 0xFF }, /* 30 */
258 { 0xFF, 0xFF }, /* 31 */
259 { 0xFF, 0xFF }, /* 32 */
260 { 0xFF, 0xFF }, /* 33 */
261 { 0xFF, 0xF7 }, /* 34 */
262 { 0xFF, 0x2F }, /* 35 */
263 { 0xFF, 0xCF }, /* 36 */
264 { 0xFF, 0xFF }, /* 37 */
265 { 0xFF, 0xFF }, /* 38 */
266 { 0xFF, 0xFF }, /* 39 */
267 { 0xFF, 0xFF }, /* 3A */
268 { 0xFF, 0xFF }, /* 3B */
269 { 0xFF, 0xFF }, /* 3C */
270 { 0xFF, 0xFF }, /* 3D */
271 { 0xFF, 0xF7 }, /* 3E */
272 { 0xFF, 0x2F }, /* 3F */
273 { 0xFF, 0xCF }, /* 40 */
274 { 0xFF, 0xFF }, /* 41 */
275 { 0xFF, 0x77 }, /* 42 */
276 { 0xFF, 0xFF }, /* 43 */
277 { 0xFF, 0xFF }, /* 44 */
278 { 0xFF, 0xFF }, /* 45 */
279 { 0xFF, 0xFF }, /* 46 */
280 { 0xFF, 0xFF }, /* 47 */
281 { 0xFF, 0xFF }, /* 48 */
282 { 0xFF, 0x0F }, /* 49 */
283 { 0xFF, 0xFF }, /* 4A */
284 { 0xFF, 0xFF }, /* 4B */
285 { 0xFF, 0x3F }, /* 4C */
286 { 0xFF, 0x3F }, /* 4D */
287 { 0xFF, 0x3F }, /* 4E */
288 { 0xFF, 0xFF }, /* 4F */
289 { 0xFF, 0x7F }, /* 50 */
290 { 0xFF, 0x7F }, /* 51 */
291 { 0xFF, 0x0F }, /* 52 */
292 { 0xFF, 0x3F }, /* 53 */
293 { 0xFF, 0x3F }, /* 54 */
294 { 0xFF, 0x3F }, /* 55 */
295 { 0xFF, 0xFF }, /* 56 */
296 { 0xFF, 0xFF }, /* 57 */
297 { 0xFF, 0xBF }, /* 58 */
298 { 0xFF, 0x1F }, /* 59 */
299 { 0xFF, 0xBF }, /* 5A */
300 { 0xFF, 0x1F }, /* 5B */
301 { 0xFF, 0xBF }, /* 5C */
302 { 0xFF, 0x3F }, /* 5D */
303 { 0xFF, 0x3F }, /* 5E */
304 { 0xFF, 0x7F }, /* 5F */
305 { 0xFF, 0x7F }, /* 60 */
306 { 0xFF, 0x47 }, /* 61 */
307 { 0xFF, 0x9F }, /* 62 */
308 { 0xFF, 0x9F }, /* 63 */
309 { 0xFF, 0x9F }, /* 64 */
310 { 0xFF, 0x9F }, /* 65 */
311 { 0xFF, 0x9F }, /* 66 */
312 { 0xFF, 0xBF }, /* 67 */
313 { 0xFF, 0xBF }, /* 68 */
314 { 0xFF, 0xFF }, /* 69 */
315 { 0xFF, 0xFF }, /* 6A */
316 { 0xFF, 0x7F }, /* 6B */
317 { 0xFF, 0xF7 }, /* 6C */
318 { 0xFF, 0xFF }, /* 6D */
319 { 0xFF, 0xFF }, /* 6E */
320 { 0xFF, 0x1F }, /* 6F */
321 { 0xFF, 0xF7 }, /* 70 */
322 { 0xFF, 0xFF }, /* 71 */
323 { 0xFF, 0xFF }, /* 72 */
324 { 0xFF, 0x1F }, /* 73 */
325 { 0xFF, 0xF7 }, /* 74 */
326 { 0xFF, 0xFF }, /* 75 */
327 { 0xFF, 0xFF }, /* 76 */
328 { 0xFF, 0x1F }, /* 77 */
329 { 0xFF, 0xF7 }, /* 78 */
330 { 0xFF, 0xFF }, /* 79 */
331 { 0xFF, 0xFF }, /* 7A */
332 { 0xFF, 0x1F }, /* 7B */
333 { 0xFF, 0xF7 }, /* 7C */
334 { 0xFF, 0xFF }, /* 7D */
335 { 0xFF, 0xFF }, /* 7E */
336 { 0xFF, 0x1F }, /* 7F */
337 { 0xFF, 0xF7 }, /* 80 */
338 { 0xFF, 0xFF }, /* 81 */
339 { 0xFF, 0xFF }, /* 82 */
340 { 0xFF, 0x1F }, /* 83 */
341 { 0xFF, 0x7F }, /* 84 */
342 { 0xFF, 0x0F }, /* 85 */
343 { 0xFF, 0xD8 }, /* 86 */
344 { 0xFF, 0xFF }, /* 87 */
345 { 0xFF, 0xEF }, /* 88 */
346 { 0xFF, 0xFE }, /* 89 */
347 { 0xFF, 0xFE }, /* 8A */
348 { 0xFF, 0xFF }, /* 8B */
349 { 0xFF, 0xFF }, /* 8C */
350 { 0xFF, 0x3F }, /* 8D */
351 { 0xFF, 0xFF }, /* 8E */
352 { 0xFF, 0x3F }, /* 8F */
353 { 0xFF, 0x8F }, /* 90 */
354 { 0xFF, 0xFF }, /* 91 */
355 { 0xFF, 0x3F }, /* 92 */
356 { 0xFF, 0xFF }, /* 93 */
357 { 0xFF, 0xFF }, /* 94 */
358 { 0xFF, 0x0F }, /* 95 */
359 { 0xFF, 0x3F }, /* 96 */
360 { 0xFF, 0x8C }, /* 97 */
361 { 0x00, 0x00 }, /* 98 */
362 { 0x00, 0x00 }, /* 99 */
363 { 0x00, 0x00 }, /* 9A */
364 { 0x00, 0x00 }, /* 9B */
365 { 0x00, 0x00 }, /* 9C */
366 { 0x00, 0x00 }, /* 9D */
367 { 0x00, 0x00 }, /* 9E */
368 { 0x00, 0x00 }, /* 9F */
369 { 0x00, 0x00 }, /* A0 */
370 { 0x00, 0x00 }, /* A1 */
371 { 0x00, 0x00 }, /* A2 */
372 { 0x00, 0x00 }, /* A3 */
373 { 0x00, 0x00 }, /* A4 */
374 { 0x00, 0x00 }, /* A5 */
375 { 0x00, 0x00 }, /* A6 */
376 { 0x00, 0x00 }, /* A7 */
377 { 0x00, 0x00 }, /* A8 */
378 { 0x00, 0x00 }, /* A9 */
379 { 0x00, 0x00 }, /* AA */
380 { 0x00, 0x00 }, /* AB */
381 { 0x00, 0x00 }, /* AC */
382 { 0x00, 0x00 }, /* AD */
383 { 0x00, 0x00 }, /* AE */
384 { 0x00, 0x00 }, /* AF */
385 { 0x00, 0x00 }, /* B0 */
386 { 0x00, 0x00 }, /* B1 */
387 { 0x00, 0x00 }, /* B2 */
388 { 0x00, 0x00 }, /* B3 */
389 { 0x00, 0x00 }, /* B4 */
390 { 0x00, 0x00 }, /* B5 */
391 { 0x00, 0x00 }, /* B6 */
392 { 0x00, 0x00 }, /* B7 */
393 { 0x00, 0x00 }, /* B8 */
394 { 0x00, 0x00 }, /* B9 */
395 { 0x00, 0x00 }, /* BA */
396 { 0x00, 0x00 }, /* BB */
397 { 0x00, 0x00 }, /* BC */
398 { 0x00, 0x00 }, /* BD */
399 { 0x00, 0x00 }, /* BE */
400 { 0x00, 0x00 }, /* BF */
401 { 0x00, 0x00 }, /* C0 */
402 { 0x00, 0x00 }, /* C1 */
403 { 0x00, 0x00 }, /* C2 */
404 { 0x00, 0x00 }, /* C3 */
405 { 0x00, 0x00 }, /* C4 */
406 { 0x00, 0x00 }, /* C5 */
407 { 0x00, 0x00 }, /* C6 */
408 { 0x00, 0x00 }, /* C7 */
409 { 0x00, 0x00 }, /* C8 */
410 { 0x00, 0x00 }, /* C9 */
411 { 0x00, 0x00 }, /* CA */
412 { 0x00, 0x00 }, /* CB */
413 { 0x00, 0x00 }, /* CC */
414 { 0x00, 0x00 }, /* CD */
415 { 0x00, 0x00 }, /* CE */
416 { 0x00, 0x00 }, /* CF */
417 { 0x00, 0x00 }, /* D0 */
418 { 0x00, 0x00 }, /* D1 */
419 { 0x00, 0x00 }, /* D2 */
420 { 0x00, 0x00 }, /* D3 */
421 { 0x00, 0x00 }, /* D4 */
422 { 0x00, 0x00 }, /* D5 */
423 { 0x00, 0x00 }, /* D6 */
424 { 0x00, 0x00 }, /* D7 */
425 { 0x00, 0x00 }, /* D8 */
426 { 0x00, 0x00 }, /* D9 */
427 { 0x00, 0x00 }, /* DA */
428 { 0x00, 0x00 }, /* DB */
429 { 0x00, 0x00 }, /* DC */
430 { 0x00, 0x00 }, /* DD */
431 { 0x00, 0x00 }, /* DE */
432 { 0x00, 0x00 }, /* DF */
433 { 0x00, 0x00 }, /* E0 */
434 { 0x00, 0x00 }, /* E1 */
435 { 0x00, 0x00 }, /* E2 */
436 { 0x00, 0x00 }, /* E3 */
437 { 0x00, 0x00 }, /* E4 */
438 { 0x00, 0x00 }, /* E5 */
439 { 0x00, 0x00 }, /* E6 */
440 { 0x00, 0x00 }, /* E7 */
441 { 0x00, 0x00 }, /* E8 */
442 { 0x00, 0x00 }, /* E9 */
443 { 0x00, 0x00 }, /* EA */
444 { 0x00, 0x00 }, /* EB */
445 { 0x00, 0x00 }, /* EC */
446 { 0x00, 0x00 }, /* ED */
447 { 0x00, 0x00 }, /* EE */
448 { 0x00, 0x00 }, /* EF */
449 { 0x00, 0x00 }, /* F0 */
450 { 0x00, 0x00 }, /* F1 */
451 { 0x00, 0x00 }, /* F2 */
452 { 0x00, 0x00 }, /* F3 */
453 { 0x00, 0x00 }, /* F4 */
454 { 0x00, 0x00 }, /* F5 */
455 { 0x00, 0x00 }, /* F6 */
456 { 0x00, 0x00 }, /* F7 */
457 { 0x00, 0x00 }, /* F8 */
458 { 0x00, 0x00 }, /* F9 */
459 { 0x00, 0x00 }, /* FA */
460 { 0x00, 0x00 }, /* FB */
461 { 0x00, 0x00 }, /* FC */
462 { 0x00, 0x00 }, /* FD */
463 { 0x00, 0x00 }, /* FE */
464 { 0xFF, 0x00 }, /* FF */
465};
466
467static bool max98095_readable(struct device *dev, unsigned int reg) 205static bool max98095_readable(struct device *dev, unsigned int reg)
468{ 206{
469 if (reg >= M98095_REG_CNT) 207 switch (reg) {
470 return 0; 208 case M98095_001_HOST_INT_STS ... M98095_097_PWR_SYS:
471 return max98095_access[reg].readable != 0; 209 case M98095_0FF_REV_ID:
210 return true;
211 default:
212 return false;
213 }
472} 214}
473 215
474static bool max98095_volatile(struct device *dev, unsigned int reg) 216static bool max98095_writeable(struct device *dev, unsigned int reg)
475{ 217{
476 if (reg > M98095_REG_MAX_CACHED)
477 return 1;
478
479 switch (reg) { 218 switch (reg) {
480 case M98095_000_HOST_DATA: 219 case M98095_00F_HOST_CFG ... M98095_097_PWR_SYS:
481 case M98095_001_HOST_INT_STS: 220 return true;
482 case M98095_002_HOST_RSP_STS: 221 default:
483 case M98095_003_HOST_CMD_STS: 222 return false;
484 case M98095_004_CODEC_STS:
485 case M98095_005_DAI1_ALC_STS:
486 case M98095_006_DAI2_ALC_STS:
487 case M98095_007_JACK_AUTO_STS:
488 case M98095_008_JACK_MANUAL_STS:
489 case M98095_009_JACK_VBAT_STS:
490 case M98095_00A_ACC_ADC_STS:
491 case M98095_00B_MIC_NG_AGC_STS:
492 case M98095_00C_SPK_L_VOLT_STS:
493 case M98095_00D_SPK_R_VOLT_STS:
494 case M98095_00E_TEMP_SENSOR_STS:
495 return 1;
496 } 223 }
224}
497 225
498 return 0; 226static bool max98095_volatile(struct device *dev, unsigned int reg)
227{
228 switch (reg) {
229 case M98095_000_HOST_DATA ... M98095_00E_TEMP_SENSOR_STS:
230 case M98095_REG_MAX_CACHED + 1 ... M98095_0FF_REV_ID:
231 return true;
232 default:
233 return false;
234 }
499} 235}
500 236
501static const struct regmap_config max98095_regmap = { 237static const struct regmap_config max98095_regmap = {
@@ -508,6 +244,7 @@ static const struct regmap_config max98095_regmap = {
508 .cache_type = REGCACHE_RBTREE, 244 .cache_type = REGCACHE_RBTREE,
509 245
510 .readable_reg = max98095_readable, 246 .readable_reg = max98095_readable,
247 .writeable_reg = max98095_writeable,
511 .volatile_reg = max98095_volatile, 248 .volatile_reg = max98095_volatile,
512}; 249};
513 250
@@ -1653,10 +1390,13 @@ static int max98095_set_bias_level(struct snd_soc_codec *codec,
1653 if (IS_ERR(max98095->mclk)) 1390 if (IS_ERR(max98095->mclk))
1654 break; 1391 break;
1655 1392
1656 if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) 1393 if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
1657 clk_disable_unprepare(max98095->mclk); 1394 clk_disable_unprepare(max98095->mclk);
1658 else 1395 } else {
1659 clk_prepare_enable(max98095->mclk); 1396 ret = clk_prepare_enable(max98095->mclk);
1397 if (ret)
1398 return ret;
1399 }
1660 break; 1400 break;
1661 1401
1662 case SND_SOC_BIAS_STANDBY: 1402 case SND_SOC_BIAS_STANDBY:
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 3a2fda08a893..f5e3dce2633a 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -31,6 +31,9 @@ static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
31{ 31{
32 struct gpio_desc *sdmode = snd_soc_dai_get_drvdata(dai); 32 struct gpio_desc *sdmode = snd_soc_dai_get_drvdata(dai);
33 33
34 if (!sdmode)
35 return 0;
36
34 switch (cmd) { 37 switch (cmd) {
35 case SNDRV_PCM_TRIGGER_START: 38 case SNDRV_PCM_TRIGGER_START:
36 case SNDRV_PCM_TRIGGER_RESUME: 39 case SNDRV_PCM_TRIGGER_RESUME:
@@ -48,24 +51,21 @@ static int max98357a_daiops_trigger(struct snd_pcm_substream *substream,
48} 51}
49 52
50static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = { 53static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = {
51 SND_SOC_DAPM_DAC("SDMode", NULL, SND_SOC_NOPM, 0, 0),
52 SND_SOC_DAPM_OUTPUT("Speaker"), 54 SND_SOC_DAPM_OUTPUT("Speaker"),
53}; 55};
54 56
55static const struct snd_soc_dapm_route max98357a_dapm_routes[] = { 57static const struct snd_soc_dapm_route max98357a_dapm_routes[] = {
56 {"Speaker", NULL, "SDMode"}, 58 {"Speaker", NULL, "HiFi Playback"},
57}; 59};
58 60
59static int max98357a_codec_probe(struct snd_soc_codec *codec) 61static int max98357a_codec_probe(struct snd_soc_codec *codec)
60{ 62{
61 struct gpio_desc *sdmode; 63 struct gpio_desc *sdmode;
62 64
63 sdmode = devm_gpiod_get(codec->dev, "sdmode", GPIOD_OUT_LOW); 65 sdmode = devm_gpiod_get_optional(codec->dev, "sdmode", GPIOD_OUT_LOW);
64 if (IS_ERR(sdmode)) { 66 if (IS_ERR(sdmode))
65 dev_err(codec->dev, "%s() unable to get sdmode GPIO: %ld\n",
66 __func__, PTR_ERR(sdmode));
67 return PTR_ERR(sdmode); 67 return PTR_ERR(sdmode);
68 } 68
69 snd_soc_codec_set_drvdata(codec, sdmode); 69 snd_soc_codec_set_drvdata(codec, sdmode);
70 70
71 return 0; 71 return 0;
@@ -79,7 +79,7 @@ static struct snd_soc_codec_driver max98357a_codec_driver = {
79 .num_dapm_routes = ARRAY_SIZE(max98357a_dapm_routes), 79 .num_dapm_routes = ARRAY_SIZE(max98357a_dapm_routes),
80}; 80};
81 81
82static struct snd_soc_dai_ops max98357a_dai_ops = { 82static const struct snd_soc_dai_ops max98357a_dai_ops = {
83 .trigger = max98357a_daiops_trigger, 83 .trigger = max98357a_daiops_trigger,
84}; 84};
85 85
@@ -104,15 +104,8 @@ static struct snd_soc_dai_driver max98357a_dai_driver = {
104 104
105static int max98357a_platform_probe(struct platform_device *pdev) 105static int max98357a_platform_probe(struct platform_device *pdev)
106{ 106{
107 int ret; 107 return snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
108
109 ret = snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
110 &max98357a_dai_driver, 1); 108 &max98357a_dai_driver, 1);
111 if (ret)
112 dev_err(&pdev->dev, "%s() error registering codec driver: %d\n",
113 __func__, ret);
114
115 return ret;
116} 109}
117 110
118static int max98357a_platform_remove(struct platform_device *pdev) 111static int max98357a_platform_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 7692623ad5c3..fb448dde018d 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -20,9 +20,7 @@
20 20
21#include "max9877.h" 21#include "max9877.h"
22 22
23static struct regmap *regmap; 23static const struct reg_default max9877_regs[] = {
24
25static struct reg_default max9877_regs[] = {
26 { 0, 0x40 }, 24 { 0, 0x40 },
27 { 1, 0x00 }, 25 { 1, 0x00 },
28 { 2, 0x00 }, 26 { 2, 0x00 },
@@ -123,7 +121,7 @@ static const struct snd_soc_dapm_route max9877_dapm_routes[] = {
123 { "HPR", NULL, "SHDN" }, 121 { "HPR", NULL, "SHDN" },
124}; 122};
125 123
126static const struct snd_soc_codec_driver max9877_codec = { 124static const struct snd_soc_component_driver max9877_component_driver = {
127 .controls = max9877_controls, 125 .controls = max9877_controls,
128 .num_controls = ARRAY_SIZE(max9877_controls), 126 .num_controls = ARRAY_SIZE(max9877_controls),
129 127
@@ -145,6 +143,7 @@ static const struct regmap_config max9877_regmap = {
145static int max9877_i2c_probe(struct i2c_client *client, 143static int max9877_i2c_probe(struct i2c_client *client,
146 const struct i2c_device_id *id) 144 const struct i2c_device_id *id)
147{ 145{
146 struct regmap *regmap;
148 int i; 147 int i;
149 148
150 regmap = devm_regmap_init_i2c(client, &max9877_regmap); 149 regmap = devm_regmap_init_i2c(client, &max9877_regmap);
@@ -155,14 +154,8 @@ static int max9877_i2c_probe(struct i2c_client *client,
155 for (i = 0; i < ARRAY_SIZE(max9877_regs); i++) 154 for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
156 regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def); 155 regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def);
157 156
158 return snd_soc_register_codec(&client->dev, &max9877_codec, NULL, 0); 157 return devm_snd_soc_register_component(&client->dev,
159} 158 &max9877_component_driver, NULL, 0);
160
161static int max9877_i2c_remove(struct i2c_client *client)
162{
163 snd_soc_unregister_codec(&client->dev);
164
165 return 0;
166} 159}
167 160
168static const struct i2c_device_id max9877_i2c_id[] = { 161static const struct i2c_device_id max9877_i2c_id[] = {
@@ -176,7 +169,6 @@ static struct i2c_driver max9877_i2c_driver = {
176 .name = "max9877", 169 .name = "max9877",
177 }, 170 },
178 .probe = max9877_i2c_probe, 171 .probe = max9877_i2c_probe,
179 .remove = max9877_i2c_remove,
180 .id_table = max9877_i2c_id, 172 .id_table = max9877_i2c_id,
181}; 173};
182 174
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index ce551eecbf95..ebb648aea8c6 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -271,8 +271,6 @@ static inline int max98925_rate_value(struct snd_soc_codec *codec,
271 break; 271 break;
272 } 272 }
273 } 273 }
274 dev_dbg(codec->dev, "%s: sample rate is %d, returning %d\n",
275 __func__, rate_table[i].rate, *value);
276 return ret; 274 return ret;
277} 275}
278 276
@@ -523,7 +521,6 @@ static int max98925_probe(struct snd_soc_codec *codec)
523 struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec); 521 struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
524 522
525 max98925->codec = codec; 523 max98925->codec = codec;
526 codec->control_data = max98925->regmap;
527 regmap_write(max98925->regmap, MAX98925_GLOBAL_ENABLE, 0x00); 524 regmap_write(max98925->regmap, MAX98925_GLOBAL_ENABLE, 0x00);
528 /* It's not the default but we need to set DAI_DLY */ 525 /* It's not the default but we need to set DAI_DLY */
529 regmap_write(max98925->regmap, 526 regmap_write(max98925->regmap,
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 3d44fc50e4d0..3e770cbe7f0f 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -650,14 +650,14 @@ static int mc13783_remove(struct snd_soc_codec *codec)
650#define MC13783_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ 650#define MC13783_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
651 SNDRV_PCM_FMTBIT_S24_LE) 651 SNDRV_PCM_FMTBIT_S24_LE)
652 652
653static struct snd_soc_dai_ops mc13783_ops_dac = { 653static const struct snd_soc_dai_ops mc13783_ops_dac = {
654 .hw_params = mc13783_pcm_hw_params_dac, 654 .hw_params = mc13783_pcm_hw_params_dac,
655 .set_fmt = mc13783_set_fmt_async, 655 .set_fmt = mc13783_set_fmt_async,
656 .set_sysclk = mc13783_set_sysclk_dac, 656 .set_sysclk = mc13783_set_sysclk_dac,
657 .set_tdm_slot = mc13783_set_tdm_slot_dac, 657 .set_tdm_slot = mc13783_set_tdm_slot_dac,
658}; 658};
659 659
660static struct snd_soc_dai_ops mc13783_ops_codec = { 660static const struct snd_soc_dai_ops mc13783_ops_codec = {
661 .hw_params = mc13783_pcm_hw_params_codec, 661 .hw_params = mc13783_pcm_hw_params_codec,
662 .set_fmt = mc13783_set_fmt_async, 662 .set_fmt = mc13783_set_fmt_async,
663 .set_sysclk = mc13783_set_sysclk_codec, 663 .set_sysclk = mc13783_set_sysclk_codec,
@@ -698,7 +698,7 @@ static struct snd_soc_dai_driver mc13783_dai_async[] = {
698 }, 698 },
699}; 699};
700 700
701static struct snd_soc_dai_ops mc13783_ops_sync = { 701static const struct snd_soc_dai_ops mc13783_ops_sync = {
702 .hw_params = mc13783_pcm_hw_params_sync, 702 .hw_params = mc13783_pcm_hw_params_sync,
703 .set_fmt = mc13783_set_fmt_sync, 703 .set_fmt = mc13783_set_fmt_sync,
704 .set_sysclk = mc13783_set_sysclk_sync, 704 .set_sysclk = mc13783_set_sysclk_sync,
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index bda2bd751be4..f561c78b9e0e 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -199,7 +199,7 @@ static const struct clk_coeff coeff_div[] = {
199 {12288000, 48000, 0xc, 0x0, 0x30, 0x0, 0x4}, 199 {12288000, 48000, 0xc, 0x0, 0x30, 0x0, 0x4},
200}; 200};
201 201
202static struct reg_default ml26124_reg[] = { 202static const struct reg_default ml26124_reg[] = {
203 /* CLOCK control Register */ 203 /* CLOCK control Register */
204 {0x00, 0x00 }, /* Sampling Rate */ 204 {0x00, 0x00 }, /* Sampling Rate */
205 {0x02, 0x00}, /* PLL NL */ 205 {0x02, 0x00}, /* PLL NL */
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index b2c990f08aa3..58325234285c 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -95,17 +95,22 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
95 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec); 95 struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
96 int i = 0, val = -1, enable = 0; 96 int i = 0, val = -1, enable = 0;
97 97
98 if (priv->deemph) 98 if (priv->deemph) {
99 for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++) 99 for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++) {
100 if (pcm1681_deemph[i] == priv->rate) 100 if (pcm1681_deemph[i] == priv->rate) {
101 val = i; 101 val = i;
102 break;
103 }
104 }
105 }
102 106
103 if (val != -1) { 107 if (val != -1) {
104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, 108 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
105 PCM1681_DEEMPH_RATE_MASK, val); 109 PCM1681_DEEMPH_RATE_MASK, val << 3);
106 enable = 1; 110 enable = 1;
107 } else 111 } else {
108 enable = 0; 112 enable = 0;
113 }
109 114
110 /* enable/disable deemphasis functionality */ 115 /* enable/disable deemphasis functionality */
111 return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, 116 return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 56650d6c2f53..aca479fa7670 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -11,38 +11,98 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/regmap.h>
14 15
15#include "rl6231.h" 16#include "rl6231.h"
16 17
17/** 18/**
18 * rl6231_calc_dmic_clk - Calculate the parameter of dmic. 19 * rl6231_get_pre_div - Return the value of pre divider.
20 *
21 * @map: map for setting.
22 * @reg: register.
23 * @sft: shift.
24 *
25 * Return the value of pre divider from given register value.
26 * Return negative error code for unexpected register value.
27 */
28int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft)
29{
30 int pd, val;
31
32 regmap_read(map, reg, &val);
33
34 val = (val >> sft) & 0x7;
35
36 switch (val) {
37 case 0:
38 case 1:
39 case 2:
40 case 3:
41 pd = val + 1;
42 break;
43 case 4:
44 pd = 6;
45 break;
46 case 5:
47 pd = 8;
48 break;
49 case 6:
50 pd = 12;
51 break;
52 case 7:
53 pd = 16;
54 break;
55 default:
56 pd = -EINVAL;
57 break;
58 }
59
60 return pd;
61}
62EXPORT_SYMBOL_GPL(rl6231_get_pre_div);
63
64/**
65 * rl6231_calc_dmic_clk - Calculate the frequency divider parameter of dmic.
19 * 66 *
20 * @rate: base clock rate. 67 * @rate: base clock rate.
21 * 68 *
22 * Choose dmic clock between 1MHz and 3MHz. 69 * Choose divider parameter that gives the highest possible DMIC frequency in
23 * It is better for clock to approximate 3MHz. 70 * 1MHz - 3MHz range.
24 */ 71 */
25int rl6231_calc_dmic_clk(int rate) 72int rl6231_calc_dmic_clk(int rate)
26{ 73{
27 int div[] = {2, 3, 4, 6, 8, 12}, idx = -EINVAL; 74 int div[] = {2, 3, 4, 6, 8, 12};
28 int i, red, bound, temp; 75 int i;
76
77 if (rate < 1000000 * div[0]) {
78 pr_warn("Base clock rate %d is too low\n", rate);
79 return -EINVAL;
80 }
29 81
30 red = 3000000 * 12;
31 for (i = 0; i < ARRAY_SIZE(div); i++) { 82 for (i = 0; i < ARRAY_SIZE(div); i++) {
32 bound = div[i] * 3000000; 83 /* find divider that gives DMIC frequency below 3MHz */
33 if (rate > bound) 84 if (3000000 * div[i] >= rate)
34 continue; 85 return i;
35 temp = bound - rate;
36 if (temp < red) {
37 red = temp;
38 idx = i;
39 }
40 } 86 }
41 87
42 return idx; 88 pr_warn("Base clock rate %d is too high\n", rate);
89 return -EINVAL;
43} 90}
44EXPORT_SYMBOL_GPL(rl6231_calc_dmic_clk); 91EXPORT_SYMBOL_GPL(rl6231_calc_dmic_clk);
45 92
93struct pll_calc_map {
94 unsigned int pll_in;
95 unsigned int pll_out;
96 int k;
97 int n;
98 int m;
99 bool m_bp;
100};
101
102static const struct pll_calc_map pll_preset_table[] = {
103 {19200000, 24576000, 3, 30, 3, false},
104};
105
46/** 106/**
47 * rl6231_pll_calc - Calcualte PLL M/N/K code. 107 * rl6231_pll_calc - Calcualte PLL M/N/K code.
48 * @freq_in: external clock provided to codec. 108 * @freq_in: external clock provided to codec.
@@ -57,7 +117,7 @@ int rl6231_pll_calc(const unsigned int freq_in,
57 const unsigned int freq_out, struct rl6231_pll_code *pll_code) 117 const unsigned int freq_out, struct rl6231_pll_code *pll_code)
58{ 118{
59 int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX; 119 int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
60 int k, red, n_t, pll_out, in_t, out_t; 120 int i, k, red, n_t, pll_out, in_t, out_t;
61 int n = 0, m = 0, m_t = 0; 121 int n = 0, m = 0, m_t = 0;
62 int red_t = abs(freq_out - freq_in); 122 int red_t = abs(freq_out - freq_in);
63 bool bypass = false; 123 bool bypass = false;
@@ -65,6 +125,18 @@ int rl6231_pll_calc(const unsigned int freq_in,
65 if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in) 125 if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
66 return -EINVAL; 126 return -EINVAL;
67 127
128 for (i = 0; i < ARRAY_SIZE(pll_preset_table); i++) {
129 if (freq_in == pll_preset_table[i].pll_in &&
130 freq_out == pll_preset_table[i].pll_out) {
131 k = pll_preset_table[i].k;
132 m = pll_preset_table[i].m;
133 n = pll_preset_table[i].n;
134 bypass = pll_preset_table[i].m_bp;
135 pr_debug("Use preset PLL parameter table\n");
136 goto code_find;
137 }
138 }
139
68 k = 100000000 / freq_out - 2; 140 k = 100000000 / freq_out - 2;
69 if (k > RL6231_PLL_K_MAX) 141 if (k > RL6231_PLL_K_MAX)
70 k = RL6231_PLL_K_MAX; 142 k = RL6231_PLL_K_MAX;
diff --git a/sound/soc/codecs/rl6231.h b/sound/soc/codecs/rl6231.h
index 0f7b057ed736..4c77b441fba2 100644
--- a/sound/soc/codecs/rl6231.h
+++ b/sound/soc/codecs/rl6231.h
@@ -30,5 +30,6 @@ int rl6231_calc_dmic_clk(int rate);
30int rl6231_pll_calc(const unsigned int freq_in, 30int rl6231_pll_calc(const unsigned int freq_in,
31 const unsigned int freq_out, struct rl6231_pll_code *pll_code); 31 const unsigned int freq_out, struct rl6231_pll_code *pll_code);
32int rl6231_get_clk_info(int sclk, int rate); 32int rl6231_get_clk_info(int sclk, int rate);
33int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft);
33 34
34#endif /* __RL6231_H__ */ 35#endif /* __RL6231_H__ */
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 83029e461309..bd9365885f73 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -38,7 +38,7 @@
38#define RT288_VENDOR_ID 0x10ec0288 38#define RT288_VENDOR_ID 0x10ec0288
39 39
40struct rt286_priv { 40struct rt286_priv {
41 struct reg_default *index_cache; 41 const struct reg_default *index_cache;
42 int index_cache_size; 42 int index_cache_size;
43 struct regmap *regmap; 43 struct regmap *regmap;
44 struct snd_soc_codec *codec; 44 struct snd_soc_codec *codec;
@@ -50,7 +50,7 @@ struct rt286_priv {
50 int clk_id; 50 int clk_id;
51}; 51};
52 52
53static struct reg_default rt286_index_def[] = { 53static const struct reg_default rt286_index_def[] = {
54 { 0x01, 0xaaaa }, 54 { 0x01, 0xaaaa },
55 { 0x02, 0x8aaa }, 55 { 0x02, 0x8aaa },
56 { 0x03, 0x0002 }, 56 { 0x03, 0x0002 },
@@ -1108,7 +1108,7 @@ static const struct acpi_device_id rt286_acpi_match[] = {
1108}; 1108};
1109MODULE_DEVICE_TABLE(acpi, rt286_acpi_match); 1109MODULE_DEVICE_TABLE(acpi, rt286_acpi_match);
1110 1110
1111static struct dmi_system_id force_combo_jack_table[] = { 1111static const struct dmi_system_id force_combo_jack_table[] = {
1112 { 1112 {
1113 .ident = "Intel Wilson Beach", 1113 .ident = "Intel Wilson Beach",
1114 .matches = { 1114 .matches = {
@@ -1118,7 +1118,7 @@ static struct dmi_system_id force_combo_jack_table[] = {
1118 { } 1118 { }
1119}; 1119};
1120 1120
1121static struct dmi_system_id dmi_dell_dino[] = { 1121static const struct dmi_system_id dmi_dell_dino[] = {
1122 { 1122 {
1123 .ident = "Dell Dino", 1123 .ident = "Dell Dino",
1124 .matches = { 1124 .matches = {
@@ -1157,7 +1157,7 @@ static int rt286_i2c_probe(struct i2c_client *i2c,
1157 } 1157 }
1158 if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) { 1158 if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
1159 dev_err(&i2c->dev, 1159 dev_err(&i2c->dev,
1160 "Device with ID register %x is not rt286\n", val); 1160 "Device with ID register %#x is not rt286\n", val);
1161 return -ENODEV; 1161 return -ENODEV;
1162 } 1162 }
1163 1163
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index 75e5679dfef8..3c2f0f8d6266 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -1144,8 +1144,6 @@ static int rt298_i2c_probe(struct i2c_client *i2c,
1144 const struct acpi_device_id *acpiid; 1144 const struct acpi_device_id *acpiid;
1145 int i, ret; 1145 int i, ret;
1146 1146
1147 pr_info("%s\n", __func__);
1148
1149 rt298 = devm_kzalloc(&i2c->dev, sizeof(*rt298), 1147 rt298 = devm_kzalloc(&i2c->dev, sizeof(*rt298),
1150 GFP_KERNEL); 1148 GFP_KERNEL);
1151 if (NULL == rt298) 1149 if (NULL == rt298)
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 4a780efdd728..b4139d3da334 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -51,7 +51,7 @@ static const struct regmap_range_cfg rt5640_ranges[] = {
51 .window_len = 0x1, }, 51 .window_len = 0x1, },
52}; 52};
53 53
54static const struct reg_default init_list[] = { 54static const struct reg_sequence init_list[] = {
55 {RT5640_PR_BASE + 0x3d, 0x3600}, 55 {RT5640_PR_BASE + 0x3d, 0x3600},
56 {RT5640_PR_BASE + 0x12, 0x0aa8}, 56 {RT5640_PR_BASE + 0x12, 0x0aa8},
57 {RT5640_PR_BASE + 0x14, 0x0aaa}, 57 {RT5640_PR_BASE + 0x14, 0x0aaa},
@@ -459,10 +459,11 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
459{ 459{
460 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); 460 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
461 struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec); 461 struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
462 int idx = -EINVAL; 462 int idx, rate;
463
464 idx = rl6231_calc_dmic_clk(rt5640->sysclk);
465 463
464 rate = rt5640->sysclk / rl6231_get_pre_div(rt5640->regmap,
465 RT5640_ADDA_CLK1, RT5640_I2S_PD1_SFT);
466 idx = rl6231_calc_dmic_clk(rate);
466 if (idx < 0) 467 if (idx < 0)
467 dev_err(codec->dev, "Failed to set DMIC clock\n"); 468 dev_err(codec->dev, "Failed to set DMIC clock\n");
468 else 469 else
@@ -984,6 +985,35 @@ static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
984 return 0; 985 return 0;
985} 986}
986 987
988static int rt5640_lout_event(struct snd_soc_dapm_widget *w,
989 struct snd_kcontrol *kcontrol, int event)
990{
991 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
992
993 switch (event) {
994 case SND_SOC_DAPM_POST_PMU:
995 hp_amp_power_on(codec);
996 snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
997 RT5640_PWR_LM, RT5640_PWR_LM);
998 snd_soc_update_bits(codec, RT5640_OUTPUT,
999 RT5640_L_MUTE | RT5640_R_MUTE, 0);
1000 break;
1001
1002 case SND_SOC_DAPM_PRE_PMD:
1003 snd_soc_update_bits(codec, RT5640_OUTPUT,
1004 RT5640_L_MUTE | RT5640_R_MUTE,
1005 RT5640_L_MUTE | RT5640_R_MUTE);
1006 snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
1007 RT5640_PWR_LM, 0);
1008 break;
1009
1010 default:
1011 return 0;
1012 }
1013
1014 return 0;
1015}
1016
987static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w, 1017static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
988 struct snd_kcontrol *kcontrol, int event) 1018 struct snd_kcontrol *kcontrol, int event)
989{ 1019{
@@ -1179,13 +1209,16 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
1179 0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)), 1209 0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
1180 SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0, 1210 SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
1181 0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)), 1211 0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
1182 SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0, 1212 SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
1183 rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)), 1213 rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
1184 SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM, 1214 SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
1185 0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU), 1215 0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
1186 SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, 1216 SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
1187 rt5640_hp_event, 1217 rt5640_hp_event,
1188 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), 1218 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
1219 SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
1220 rt5640_lout_event,
1221 SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
1189 SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1, 1222 SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
1190 RT5640_PWR_HP_L_BIT, 0, NULL, 0), 1223 RT5640_PWR_HP_L_BIT, 0, NULL, 0),
1191 SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1, 1224 SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
@@ -1500,8 +1533,10 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
1500 {"HP R Playback", "Switch", "HP Amp"}, 1533 {"HP R Playback", "Switch", "HP Amp"},
1501 {"HPOL", NULL, "HP L Playback"}, 1534 {"HPOL", NULL, "HP L Playback"},
1502 {"HPOR", NULL, "HP R Playback"}, 1535 {"HPOR", NULL, "HP R Playback"},
1503 {"LOUTL", NULL, "LOUT MIX"}, 1536
1504 {"LOUTR", NULL, "LOUT MIX"}, 1537 {"LOUT amp", NULL, "LOUT MIX"},
1538 {"LOUTL", NULL, "LOUT amp"},
1539 {"LOUTR", NULL, "LOUT amp"},
1505}; 1540};
1506 1541
1507static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = { 1542static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = {
@@ -2207,7 +2242,7 @@ static int rt5640_i2c_probe(struct i2c_client *i2c,
2207 regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val); 2242 regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val);
2208 if (val != RT5640_DEVICE_ID) { 2243 if (val != RT5640_DEVICE_ID) {
2209 dev_err(&i2c->dev, 2244 dev_err(&i2c->dev,
2210 "Device with ID register %x is not rt5640/39\n", val); 2245 "Device with ID register %#x is not rt5640/39\n", val);
2211 return -ENODEV; 2246 return -ENODEV;
2212 } 2247 }
2213 2248
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 23a7e8d61429..1e70736cc970 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -21,6 +21,7 @@
21#include <linux/gpio/consumer.h> 21#include <linux/gpio/consumer.h>
22#include <linux/acpi.h> 22#include <linux/acpi.h>
23#include <linux/dmi.h> 23#include <linux/dmi.h>
24#include <linux/regulator/consumer.h>
24#include <sound/core.h> 25#include <sound/core.h>
25#include <sound/pcm.h> 26#include <sound/pcm.h>
26#include <sound/pcm_params.h> 27#include <sound/pcm_params.h>
@@ -54,7 +55,7 @@ static const struct regmap_range_cfg rt5645_ranges[] = {
54 }, 55 },
55}; 56};
56 57
57static const struct reg_default init_list[] = { 58static const struct reg_sequence init_list[] = {
58 {RT5645_PR_BASE + 0x3d, 0x3600}, 59 {RT5645_PR_BASE + 0x3d, 0x3600},
59 {RT5645_PR_BASE + 0x1c, 0xfd20}, 60 {RT5645_PR_BASE + 0x1c, 0xfd20},
60 {RT5645_PR_BASE + 0x20, 0x611f}, 61 {RT5645_PR_BASE + 0x20, 0x611f},
@@ -63,7 +64,7 @@ static const struct reg_default init_list[] = {
63}; 64};
64#define RT5645_INIT_REG_LEN ARRAY_SIZE(init_list) 65#define RT5645_INIT_REG_LEN ARRAY_SIZE(init_list)
65 66
66static const struct reg_default rt5650_init_list[] = { 67static const struct reg_sequence rt5650_init_list[] = {
67 {0xf6, 0x0100}, 68 {0xf6, 0x0100},
68}; 69};
69 70
@@ -223,6 +224,39 @@ static const struct reg_default rt5645_reg[] = {
223 { 0xff, 0x6308 }, 224 { 0xff, 0x6308 },
224}; 225};
225 226
227static const char *const rt5645_supply_names[] = {
228 "avdd",
229 "cpvdd",
230};
231
232struct rt5645_priv {
233 struct snd_soc_codec *codec;
234 struct rt5645_platform_data pdata;
235 struct regmap *regmap;
236 struct i2c_client *i2c;
237 struct gpio_desc *gpiod_hp_det;
238 struct snd_soc_jack *hp_jack;
239 struct snd_soc_jack *mic_jack;
240 struct snd_soc_jack *btn_jack;
241 struct delayed_work jack_detect_work;
242 struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
243
244 int codec_type;
245 int sysclk;
246 int sysclk_src;
247 int lrck[RT5645_AIFS];
248 int bclk[RT5645_AIFS];
249 int master[RT5645_AIFS];
250
251 int pll_src;
252 int pll_in;
253 int pll_out;
254
255 int jack_type;
256 bool en_button_func;
257 bool hp_on;
258};
259
226static int rt5645_reset(struct snd_soc_codec *codec) 260static int rt5645_reset(struct snd_soc_codec *codec)
227{ 261{
228 return snd_soc_write(codec, RT5645_RESET, 0); 262 return snd_soc_write(codec, RT5645_RESET, 0);
@@ -360,6 +394,7 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg)
360 case RT5645_DEPOP_M1: 394 case RT5645_DEPOP_M1:
361 case RT5645_DEPOP_M2: 395 case RT5645_DEPOP_M2:
362 case RT5645_DEPOP_M3: 396 case RT5645_DEPOP_M3:
397 case RT5645_CHARGE_PUMP:
363 case RT5645_MICBIAS: 398 case RT5645_MICBIAS:
364 case RT5645_A_JD_CTRL1: 399 case RT5645_A_JD_CTRL1:
365 case RT5645_VAD_CTRL4: 400 case RT5645_VAD_CTRL4:
@@ -510,10 +545,11 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
510{ 545{
511 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); 546 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
512 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec); 547 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
513 int idx = -EINVAL; 548 int idx, rate;
514
515 idx = rl6231_calc_dmic_clk(rt5645->sysclk);
516 549
550 rate = rt5645->sysclk / rl6231_get_pre_div(rt5645->regmap,
551 RT5645_ADDA_CLK1, RT5645_I2S_PD1_SFT);
552 idx = rl6231_calc_dmic_clk(rate);
517 if (idx < 0) 553 if (idx < 0)
518 dev_err(codec->dev, "Failed to set DMIC clock\n"); 554 dev_err(codec->dev, "Failed to set DMIC clock\n");
519 else 555 else
@@ -1331,15 +1367,23 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on)
1331 if (on) { 1367 if (on) {
1332 if (hp_amp_power_count <= 0) { 1368 if (hp_amp_power_count <= 0) {
1333 if (rt5645->codec_type == CODEC_TYPE_RT5650) { 1369 if (rt5645->codec_type == CODEC_TYPE_RT5650) {
1370 snd_soc_write(codec, RT5645_DEPOP_M2, 0x3100);
1334 snd_soc_write(codec, RT5645_CHARGE_PUMP, 1371 snd_soc_write(codec, RT5645_CHARGE_PUMP,
1335 0x0e06); 1372 0x0e06);
1336 snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d); 1373 snd_soc_write(codec, RT5645_DEPOP_M1, 0x000d);
1374 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1375 RT5645_HP_DCC_INT1, 0x9f01);
1376 msleep(20);
1377 snd_soc_update_bits(codec, RT5645_DEPOP_M1,
1378 RT5645_HP_CO_MASK, RT5645_HP_CO_EN);
1337 regmap_write(rt5645->regmap, RT5645_PR_BASE + 1379 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1338 0x3e, 0x7400); 1380 0x3e, 0x7400);
1339 snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737); 1381 snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737);
1340 regmap_write(rt5645->regmap, RT5645_PR_BASE + 1382 regmap_write(rt5645->regmap, RT5645_PR_BASE +
1341 RT5645_MAMP_INT_REG2, 0xfc00); 1383 RT5645_MAMP_INT_REG2, 0xfc00);
1342 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); 1384 snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
1385 mdelay(5);
1386 rt5645->hp_on = true;
1343 } else { 1387 } else {
1344 /* depop parameters */ 1388 /* depop parameters */
1345 snd_soc_update_bits(codec, RT5645_DEPOP_M2, 1389 snd_soc_update_bits(codec, RT5645_DEPOP_M2,
@@ -1553,6 +1597,27 @@ static int rt5645_bst2_event(struct snd_soc_dapm_widget *w,
1553 return 0; 1597 return 0;
1554} 1598}
1555 1599
1600static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
1601 struct snd_kcontrol *k, int event)
1602{
1603 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
1604 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
1605
1606 switch (event) {
1607 case SND_SOC_DAPM_POST_PMU:
1608 if (rt5645->hp_on) {
1609 msleep(100);
1610 rt5645->hp_on = false;
1611 }
1612 break;
1613
1614 default:
1615 return 0;
1616 }
1617
1618 return 0;
1619}
1620
1556static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = { 1621static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
1557 SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER, 1622 SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
1558 RT5645_PWR_LDO2_BIT, 0, NULL, 0), 1623 RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1697,15 +1762,6 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
1697 SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0), 1762 SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
1698 1763
1699 /* IF1 2 Mux */ 1764 /* IF1 2 Mux */
1700 SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
1701 0, 0, &rt5645_if1_adc1_in_mux),
1702 SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
1703 0, 0, &rt5645_if1_adc2_in_mux),
1704 SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
1705 0, 0, &rt5645_if1_adc3_in_mux),
1706 SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
1707 0, 0, &rt5645_if1_adc_in_mux),
1708
1709 SND_SOC_DAPM_MUX("IF2 ADC Mux", SND_SOC_NOPM, 1765 SND_SOC_DAPM_MUX("IF2 ADC Mux", SND_SOC_NOPM,
1710 0, 0, &rt5645_if2_adc_in_mux), 1766 0, 0, &rt5645_if2_adc_in_mux),
1711 1767
@@ -1716,14 +1772,6 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
1716 SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0), 1772 SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
1717 SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0), 1773 SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
1718 SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0), 1774 SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
1719 SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
1720 &rt5645_if1_dac0_tdm_sel_mux),
1721 SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
1722 &rt5645_if1_dac1_tdm_sel_mux),
1723 SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
1724 &rt5645_if1_dac2_tdm_sel_mux),
1725 SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
1726 &rt5645_if1_dac3_tdm_sel_mux),
1727 SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0), 1775 SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
1728 SND_SOC_DAPM_PGA("IF1 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0), 1776 SND_SOC_DAPM_PGA("IF1 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
1729 SND_SOC_DAPM_PGA("IF1 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0), 1777 SND_SOC_DAPM_PGA("IF1 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -1854,6 +1902,26 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
1854 SND_SOC_DAPM_OUTPUT("PDM1R"), 1902 SND_SOC_DAPM_OUTPUT("PDM1R"),
1855 SND_SOC_DAPM_OUTPUT("SPOL"), 1903 SND_SOC_DAPM_OUTPUT("SPOL"),
1856 SND_SOC_DAPM_OUTPUT("SPOR"), 1904 SND_SOC_DAPM_OUTPUT("SPOR"),
1905 SND_SOC_DAPM_POST("DAPM_POST", rt5650_hp_event),
1906};
1907
1908static const struct snd_soc_dapm_widget rt5645_specific_dapm_widgets[] = {
1909 SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
1910 &rt5645_if1_dac0_tdm_sel_mux),
1911 SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
1912 &rt5645_if1_dac1_tdm_sel_mux),
1913 SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
1914 &rt5645_if1_dac2_tdm_sel_mux),
1915 SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
1916 &rt5645_if1_dac3_tdm_sel_mux),
1917 SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
1918 0, 0, &rt5645_if1_adc_in_mux),
1919 SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
1920 0, 0, &rt5645_if1_adc1_in_mux),
1921 SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
1922 0, 0, &rt5645_if1_adc2_in_mux),
1923 SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
1924 0, 0, &rt5645_if1_adc3_in_mux),
1857}; 1925};
1858 1926
1859static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = { 1927static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = {
@@ -2642,7 +2710,7 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
2642 2710
2643 switch (level) { 2711 switch (level) {
2644 case SND_SOC_BIAS_PREPARE: 2712 case SND_SOC_BIAS_PREPARE:
2645 if (SND_SOC_BIAS_STANDBY == codec->dapm.bias_level) { 2713 if (SND_SOC_BIAS_STANDBY == snd_soc_codec_get_bias_level(codec)) {
2646 snd_soc_update_bits(codec, RT5645_PWR_ANLG1, 2714 snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
2647 RT5645_PWR_VREF1 | RT5645_PWR_MB | 2715 RT5645_PWR_VREF1 | RT5645_PWR_MB |
2648 RT5645_PWR_BG | RT5645_PWR_VREF2, 2716 RT5645_PWR_BG | RT5645_PWR_VREF2,
@@ -2686,94 +2754,15 @@ static int rt5645_set_bias_level(struct snd_soc_codec *codec,
2686 return 0; 2754 return 0;
2687} 2755}
2688 2756
2689static int rt5650_calibration(struct rt5645_priv *rt5645)
2690{
2691 int val, i;
2692 int ret = -1;
2693
2694 regcache_cache_bypass(rt5645->regmap, true);
2695 regmap_write(rt5645->regmap, RT5645_RESET, 0);
2696 regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0800);
2697 regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_CHOP_DAC_ADC,
2698 0x3600);
2699 regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x25, 0x7000);
2700 regmap_write(rt5645->regmap, RT5645_I2S1_SDP, 0x8008);
2701 /* headset type */
2702 regmap_write(rt5645->regmap, RT5645_GEN_CTRL1, 0x2061);
2703 regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
2704 regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0x2012);
2705 regmap_write(rt5645->regmap, RT5645_PWR_MIXER, 0x0002);
2706 regmap_write(rt5645->regmap, RT5645_PWR_VOL, 0x0020);
2707 regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
2708 regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
2709 regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x1827);
2710 regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x0827);
2711 msleep(400);
2712 /* Inline command */
2713 regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0001);
2714 regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
2715 regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
2716 /* Calbration */
2717 regmap_write(rt5645->regmap, RT5645_GLB_CLK, 0x8000);
2718 regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
2719 regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
2720 regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
2721 regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x8800);
2722 regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0xe8fa);
2723 regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x8c04);
2724 regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x3100);
2725 regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0e06);
2726 regmap_write(rt5645->regmap, RT5645_BASS_BACK, 0x8a13);
2727 regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0820);
2728 regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x000d);
2729 /* Power on and Calbration */
2730 regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_HP_DCC_INT1,
2731 0x9f01);
2732 msleep(200);
2733 for (i = 0; i < 5; i++) {
2734 regmap_read(rt5645->regmap, RT5645_PR_BASE + 0x7a, &val);
2735 if (val != 0 && val != 0x3f3f) {
2736 ret = 0;
2737 break;
2738 }
2739 msleep(50);
2740 }
2741 pr_debug("%s: PR-7A = 0x%x\n", __func__, val);
2742
2743 /* mute */
2744 regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x3e, 0x7400);
2745 regmap_write(rt5645->regmap, RT5645_DEPOP_M3, 0x0737);
2746 regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_MAMP_INT_REG2,
2747 0xfc00);
2748 regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x1140);
2749 regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
2750 regmap_write(rt5645->regmap, RT5645_GEN_CTRL2, 0x4020);
2751 regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x0006);
2752 regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x0000);
2753 msleep(350);
2754
2755 regcache_cache_bypass(rt5645->regmap, false);
2756
2757 return ret;
2758}
2759
2760static void rt5645_enable_push_button_irq(struct snd_soc_codec *codec, 2757static void rt5645_enable_push_button_irq(struct snd_soc_codec *codec,
2761 bool enable) 2758 bool enable)
2762{ 2759{
2763 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec); 2760 struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
2764 2761
2765 if (enable) { 2762 if (enable) {
2766 snd_soc_dapm_mutex_lock(&codec->dapm); 2763 snd_soc_dapm_force_enable_pin(dapm, "ADC L power");
2767 snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm, 2764 snd_soc_dapm_force_enable_pin(dapm, "ADC R power");
2768 "ADC L power"); 2765 snd_soc_dapm_sync(dapm);
2769 snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
2770 "ADC R power");
2771 snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
2772 "LDO2");
2773 snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
2774 "Mic Det Power");
2775 snd_soc_dapm_sync_unlocked(&codec->dapm);
2776 snd_soc_dapm_mutex_unlock(&codec->dapm);
2777 2766
2778 snd_soc_update_bits(codec, 2767 snd_soc_update_bits(codec,
2779 RT5645_INT_IRQ_ST, 0x8, 0x8); 2768 RT5645_INT_IRQ_ST, 0x8, 0x8);
@@ -2786,36 +2775,26 @@ static void rt5645_enable_push_button_irq(struct snd_soc_codec *codec,
2786 snd_soc_update_bits(codec, RT5650_4BTN_IL_CMD2, 0x8000, 0x0); 2775 snd_soc_update_bits(codec, RT5650_4BTN_IL_CMD2, 0x8000, 0x0);
2787 snd_soc_update_bits(codec, RT5645_INT_IRQ_ST, 0x8, 0x0); 2776 snd_soc_update_bits(codec, RT5645_INT_IRQ_ST, 0x8, 0x0);
2788 2777
2789 snd_soc_dapm_mutex_lock(&codec->dapm); 2778 snd_soc_dapm_disable_pin(dapm, "ADC L power");
2790 snd_soc_dapm_disable_pin_unlocked(&codec->dapm, 2779 snd_soc_dapm_disable_pin(dapm, "ADC R power");
2791 "ADC L power"); 2780 snd_soc_dapm_sync(dapm);
2792 snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
2793 "ADC R power");
2794 if (rt5645->pdata.jd_mode == 0)
2795 snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
2796 "LDO2");
2797 snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
2798 "Mic Det Power");
2799 snd_soc_dapm_sync_unlocked(&codec->dapm);
2800 snd_soc_dapm_mutex_unlock(&codec->dapm);
2801 } 2781 }
2802} 2782}
2803 2783
2804static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert) 2784static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
2805{ 2785{
2786 struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
2806 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec); 2787 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
2807 unsigned int val; 2788 unsigned int val;
2808 2789
2809 if (jack_insert) { 2790 if (jack_insert) {
2810 regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006); 2791 regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
2811 2792
2812 if (codec->component.card->instantiated) { 2793 /* for jack type detect */
2813 /* for jack type detect */ 2794 snd_soc_dapm_force_enable_pin(dapm, "LDO2");
2814 snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2"); 2795 snd_soc_dapm_force_enable_pin(dapm, "Mic Det Power");
2815 snd_soc_dapm_force_enable_pin(&codec->dapm, 2796 snd_soc_dapm_sync(dapm);
2816 "Mic Det Power"); 2797 if (!dapm->card->instantiated) {
2817 snd_soc_dapm_sync(&codec->dapm);
2818 } else {
2819 /* Power up necessary bits for JD if dapm is 2798 /* Power up necessary bits for JD if dapm is
2820 not ready yet */ 2799 not ready yet */
2821 regmap_update_bits(rt5645->regmap, RT5645_PWR_ANLG1, 2800 regmap_update_bits(rt5645->regmap, RT5645_PWR_ANLG1,
@@ -2828,14 +2807,15 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
2828 } 2807 }
2829 2808
2830 regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0); 2809 regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
2831 regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006); 2810 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
2832 regmap_update_bits(rt5645->regmap, 2811 RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
2833 RT5645_IN1_CTRL2, 0x1000, 0x1000); 2812 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
2813 RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
2834 msleep(100); 2814 msleep(100);
2835 regmap_update_bits(rt5645->regmap, 2815 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
2836 RT5645_IN1_CTRL2, 0x1000, 0x0000); 2816 RT5645_CBJ_MN_JD, 0);
2837 2817
2838 msleep(450); 2818 msleep(600);
2839 regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val); 2819 regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
2840 val &= 0x7; 2820 val &= 0x7;
2841 dev_dbg(codec->dev, "val = %d\n", val); 2821 dev_dbg(codec->dev, "val = %d\n", val);
@@ -2846,43 +2826,46 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
2846 rt5645_enable_push_button_irq(codec, true); 2826 rt5645_enable_push_button_irq(codec, true);
2847 } 2827 }
2848 } else { 2828 } else {
2849 if (codec->component.card->instantiated) { 2829 snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
2850 snd_soc_dapm_disable_pin(&codec->dapm, 2830 snd_soc_dapm_sync(dapm);
2851 "Mic Det Power");
2852 snd_soc_dapm_sync(&codec->dapm);
2853 } else
2854 regmap_update_bits(rt5645->regmap,
2855 RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
2856 rt5645->jack_type = SND_JACK_HEADPHONE; 2831 rt5645->jack_type = SND_JACK_HEADPHONE;
2857 } 2832 }
2858 2833
2834 snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
2835 snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
2836 snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
2859 } else { /* jack out */ 2837 } else { /* jack out */
2860 rt5645->jack_type = 0; 2838 rt5645->jack_type = 0;
2839
2840 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
2841 RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
2842 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
2843 RT5645_CBJ_BST1_EN, 0);
2844
2861 if (rt5645->en_button_func) 2845 if (rt5645->en_button_func)
2862 rt5645_enable_push_button_irq(codec, false); 2846 rt5645_enable_push_button_irq(codec, false);
2863 else { 2847
2864 if (codec->component.card->instantiated) { 2848 if (rt5645->pdata.jd_mode == 0)
2865 if (rt5645->pdata.jd_mode == 0) 2849 snd_soc_dapm_disable_pin(dapm, "LDO2");
2866 snd_soc_dapm_disable_pin(&codec->dapm, 2850 snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
2867 "LDO2"); 2851 snd_soc_dapm_sync(dapm);
2868 snd_soc_dapm_disable_pin(&codec->dapm,
2869 "Mic Det Power");
2870 snd_soc_dapm_sync(&codec->dapm);
2871 } else {
2872 if (rt5645->pdata.jd_mode == 0)
2873 regmap_update_bits(rt5645->regmap,
2874 RT5645_PWR_MIXER,
2875 RT5645_PWR_LDO2, 0);
2876 regmap_update_bits(rt5645->regmap,
2877 RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
2878 }
2879 }
2880 } 2852 }
2881 2853
2882 return rt5645->jack_type; 2854 return rt5645->jack_type;
2883} 2855}
2884 2856
2885static int rt5645_irq_detection(struct rt5645_priv *rt5645); 2857static int rt5645_button_detect(struct snd_soc_codec *codec)
2858{
2859 int btn_type, val;
2860
2861 val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
2862 pr_debug("val=0x%x\n", val);
2863 btn_type = val & 0xfff0;
2864 snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
2865
2866 return btn_type;
2867}
2868
2886static irqreturn_t rt5645_irq(int irq, void *data); 2869static irqreturn_t rt5645_irq(int irq, void *data);
2887 2870
2888int rt5645_set_jack_detect(struct snd_soc_codec *codec, 2871int rt5645_set_jack_detect(struct snd_soc_codec *codec,
@@ -2913,36 +2896,11 @@ static void rt5645_jack_detect_work(struct work_struct *work)
2913{ 2896{
2914 struct rt5645_priv *rt5645 = 2897 struct rt5645_priv *rt5645 =
2915 container_of(work, struct rt5645_priv, jack_detect_work.work); 2898 container_of(work, struct rt5645_priv, jack_detect_work.work);
2916
2917 rt5645_irq_detection(rt5645);
2918}
2919
2920static irqreturn_t rt5645_irq(int irq, void *data)
2921{
2922 struct rt5645_priv *rt5645 = data;
2923
2924 queue_delayed_work(system_power_efficient_wq,
2925 &rt5645->jack_detect_work, msecs_to_jiffies(250));
2926
2927 return IRQ_HANDLED;
2928}
2929
2930static int rt5645_button_detect(struct snd_soc_codec *codec)
2931{
2932 int btn_type, val;
2933
2934 val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
2935 pr_debug("val=0x%x\n", val);
2936 btn_type = val & 0xfff0;
2937 snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
2938
2939 return btn_type;
2940}
2941
2942static int rt5645_irq_detection(struct rt5645_priv *rt5645)
2943{
2944 int val, btn_type, gpio_state = 0, report = 0; 2899 int val, btn_type, gpio_state = 0, report = 0;
2945 2900
2901 if (!rt5645->codec)
2902 return;
2903
2946 switch (rt5645->pdata.jd_mode) { 2904 switch (rt5645->pdata.jd_mode) {
2947 case 0: /* Not using rt5645 JD */ 2905 case 0: /* Not using rt5645 JD */
2948 if (rt5645->gpiod_hp_det) { 2906 if (rt5645->gpiod_hp_det) {
@@ -2955,7 +2913,7 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
2955 report, SND_JACK_HEADPHONE); 2913 report, SND_JACK_HEADPHONE);
2956 snd_soc_jack_report(rt5645->mic_jack, 2914 snd_soc_jack_report(rt5645->mic_jack,
2957 report, SND_JACK_MICROPHONE); 2915 report, SND_JACK_MICROPHONE);
2958 return report; 2916 return;
2959 case 1: /* 2 port */ 2917 case 1: /* 2 port */
2960 val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0070; 2918 val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0070;
2961 break; 2919 break;
@@ -3037,27 +2995,39 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
3037 snd_soc_jack_report(rt5645->btn_jack, 2995 snd_soc_jack_report(rt5645->btn_jack,
3038 report, SND_JACK_BTN_0 | SND_JACK_BTN_1 | 2996 report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
3039 SND_JACK_BTN_2 | SND_JACK_BTN_3); 2997 SND_JACK_BTN_2 | SND_JACK_BTN_3);
2998}
2999
3000static irqreturn_t rt5645_irq(int irq, void *data)
3001{
3002 struct rt5645_priv *rt5645 = data;
3003
3004 queue_delayed_work(system_power_efficient_wq,
3005 &rt5645->jack_detect_work, msecs_to_jiffies(250));
3040 3006
3041 return report; 3007 return IRQ_HANDLED;
3042} 3008}
3043 3009
3044static int rt5645_probe(struct snd_soc_codec *codec) 3010static int rt5645_probe(struct snd_soc_codec *codec)
3045{ 3011{
3012 struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
3046 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec); 3013 struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
3047 3014
3048 rt5645->codec = codec; 3015 rt5645->codec = codec;
3049 3016
3050 switch (rt5645->codec_type) { 3017 switch (rt5645->codec_type) {
3051 case CODEC_TYPE_RT5645: 3018 case CODEC_TYPE_RT5645:
3052 snd_soc_dapm_add_routes(&codec->dapm, 3019 snd_soc_dapm_new_controls(dapm,
3020 rt5645_specific_dapm_widgets,
3021 ARRAY_SIZE(rt5645_specific_dapm_widgets));
3022 snd_soc_dapm_add_routes(dapm,
3053 rt5645_specific_dapm_routes, 3023 rt5645_specific_dapm_routes,
3054 ARRAY_SIZE(rt5645_specific_dapm_routes)); 3024 ARRAY_SIZE(rt5645_specific_dapm_routes));
3055 break; 3025 break;
3056 case CODEC_TYPE_RT5650: 3026 case CODEC_TYPE_RT5650:
3057 snd_soc_dapm_new_controls(&codec->dapm, 3027 snd_soc_dapm_new_controls(dapm,
3058 rt5650_specific_dapm_widgets, 3028 rt5650_specific_dapm_widgets,
3059 ARRAY_SIZE(rt5650_specific_dapm_widgets)); 3029 ARRAY_SIZE(rt5650_specific_dapm_widgets));
3060 snd_soc_dapm_add_routes(&codec->dapm, 3030 snd_soc_dapm_add_routes(dapm,
3061 rt5650_specific_dapm_routes, 3031 rt5650_specific_dapm_routes,
3062 ARRAY_SIZE(rt5650_specific_dapm_routes)); 3032 ARRAY_SIZE(rt5650_specific_dapm_routes));
3063 break; 3033 break;
@@ -3067,9 +3037,9 @@ static int rt5645_probe(struct snd_soc_codec *codec)
3067 3037
3068 /* for JD function */ 3038 /* for JD function */
3069 if (rt5645->pdata.jd_mode) { 3039 if (rt5645->pdata.jd_mode) {
3070 snd_soc_dapm_force_enable_pin(&codec->dapm, "JD Power"); 3040 snd_soc_dapm_force_enable_pin(dapm, "JD Power");
3071 snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2"); 3041 snd_soc_dapm_force_enable_pin(dapm, "LDO2");
3072 snd_soc_dapm_sync(&codec->dapm); 3042 snd_soc_dapm_sync(dapm);
3073 } 3043 }
3074 3044
3075 return 0; 3045 return 0;
@@ -3110,7 +3080,7 @@ static int rt5645_resume(struct snd_soc_codec *codec)
3110#define RT5645_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ 3080#define RT5645_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
3111 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8) 3081 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
3112 3082
3113static struct snd_soc_dai_ops rt5645_aif_dai_ops = { 3083static const struct snd_soc_dai_ops rt5645_aif_dai_ops = {
3114 .hw_params = rt5645_hw_params, 3084 .hw_params = rt5645_hw_params,
3115 .set_fmt = rt5645_set_dai_fmt, 3085 .set_fmt = rt5645_set_dai_fmt,
3116 .set_sysclk = rt5645_set_dai_sysclk, 3086 .set_sysclk = rt5645_set_dai_sysclk,
@@ -3221,7 +3191,7 @@ static int strago_quirk_cb(const struct dmi_system_id *id)
3221 return 1; 3191 return 1;
3222} 3192}
3223 3193
3224static struct dmi_system_id dmi_platform_intel_braswell[] = { 3194static const struct dmi_system_id dmi_platform_intel_braswell[] = {
3225 { 3195 {
3226 .ident = "Intel Strago", 3196 .ident = "Intel Strago",
3227 .callback = strago_quirk_cb, 3197 .callback = strago_quirk_cb,
@@ -3229,6 +3199,13 @@ static struct dmi_system_id dmi_platform_intel_braswell[] = {
3229 DMI_MATCH(DMI_PRODUCT_NAME, "Strago"), 3199 DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
3230 }, 3200 },
3231 }, 3201 },
3202 {
3203 .ident = "Google Celes",
3204 .callback = strago_quirk_cb,
3205 .matches = {
3206 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
3207 },
3208 },
3232 { } 3209 { }
3233}; 3210};
3234 3211
@@ -3251,7 +3228,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3251{ 3228{
3252 struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev); 3229 struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
3253 struct rt5645_priv *rt5645; 3230 struct rt5645_priv *rt5645;
3254 int ret; 3231 int ret, i;
3255 unsigned int val; 3232 unsigned int val;
3256 3233
3257 rt5645 = devm_kzalloc(&i2c->dev, sizeof(struct rt5645_priv), 3234 rt5645 = devm_kzalloc(&i2c->dev, sizeof(struct rt5645_priv),
@@ -3285,6 +3262,24 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3285 return ret; 3262 return ret;
3286 } 3263 }
3287 3264
3265 for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
3266 rt5645->supplies[i].supply = rt5645_supply_names[i];
3267
3268 ret = devm_regulator_bulk_get(&i2c->dev,
3269 ARRAY_SIZE(rt5645->supplies),
3270 rt5645->supplies);
3271 if (ret) {
3272 dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
3273 return ret;
3274 }
3275
3276 ret = regulator_bulk_enable(ARRAY_SIZE(rt5645->supplies),
3277 rt5645->supplies);
3278 if (ret) {
3279 dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
3280 return ret;
3281 }
3282
3288 regmap_read(rt5645->regmap, RT5645_VENDOR_ID2, &val); 3283 regmap_read(rt5645->regmap, RT5645_VENDOR_ID2, &val);
3289 3284
3290 switch (val) { 3285 switch (val) {
@@ -3296,16 +3291,10 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3296 break; 3291 break;
3297 default: 3292 default:
3298 dev_err(&i2c->dev, 3293 dev_err(&i2c->dev,
3299 "Device with ID register %x is not rt5645 or rt5650\n", 3294 "Device with ID register %#x is not rt5645 or rt5650\n",
3300 val); 3295 val);
3301 return -ENODEV; 3296 ret = -ENODEV;
3302 } 3297 goto err_enable;
3303
3304 if (rt5645->codec_type == CODEC_TYPE_RT5650) {
3305 ret = rt5650_calibration(rt5645);
3306
3307 if (ret < 0)
3308 pr_err("calibration failed!\n");
3309 } 3298 }
3310 3299
3311 regmap_write(rt5645->regmap, RT5645_RESET, 0); 3300 regmap_write(rt5645->regmap, RT5645_RESET, 0);
@@ -3338,6 +3327,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3338 break; 3327 break;
3339 3328
3340 case RT5645_DMIC_DATA_GPIO5: 3329 case RT5645_DMIC_DATA_GPIO5:
3330 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
3331 RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
3341 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, 3332 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
3342 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); 3333 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
3343 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 3334 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
@@ -3393,8 +3384,6 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3393 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3, 3384 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
3394 RT5645_IRQ_CLK_GATE_CTRL, 3385 RT5645_IRQ_CLK_GATE_CTRL,
3395 RT5645_IRQ_CLK_GATE_CTRL); 3386 RT5645_IRQ_CLK_GATE_CTRL);
3396 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
3397 RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
3398 regmap_update_bits(rt5645->regmap, RT5645_MICBIAS, 3387 regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
3399 RT5645_IRQ_CLK_INT, RT5645_IRQ_CLK_INT); 3388 RT5645_IRQ_CLK_INT, RT5645_IRQ_CLK_INT);
3400 regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2, 3389 regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
@@ -3434,12 +3423,25 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3434 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq, 3423 ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
3435 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING 3424 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
3436 | IRQF_ONESHOT, "rt5645", rt5645); 3425 | IRQF_ONESHOT, "rt5645", rt5645);
3437 if (ret) 3426 if (ret) {
3438 dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret); 3427 dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
3428 goto err_enable;
3429 }
3439 } 3430 }
3440 3431
3441 return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645, 3432 ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
3442 rt5645_dai, ARRAY_SIZE(rt5645_dai)); 3433 rt5645_dai, ARRAY_SIZE(rt5645_dai));
3434 if (ret)
3435 goto err_irq;
3436
3437 return 0;
3438
3439err_irq:
3440 if (rt5645->i2c->irq)
3441 free_irq(rt5645->i2c->irq, rt5645);
3442err_enable:
3443 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
3444 return ret;
3443} 3445}
3444 3446
3445static int rt5645_i2c_remove(struct i2c_client *i2c) 3447static int rt5645_i2c_remove(struct i2c_client *i2c)
@@ -3452,17 +3454,31 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
3452 cancel_delayed_work_sync(&rt5645->jack_detect_work); 3454 cancel_delayed_work_sync(&rt5645->jack_detect_work);
3453 3455
3454 snd_soc_unregister_codec(&i2c->dev); 3456 snd_soc_unregister_codec(&i2c->dev);
3457 regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
3455 3458
3456 return 0; 3459 return 0;
3457} 3460}
3458 3461
3462static void rt5645_i2c_shutdown(struct i2c_client *i2c)
3463{
3464 struct rt5645_priv *rt5645 = i2c_get_clientdata(i2c);
3465
3466 regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
3467 RT5645_RING2_SLEEVE_GND, RT5645_RING2_SLEEVE_GND);
3468 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, RT5645_CBJ_MN_JD,
3469 RT5645_CBJ_MN_JD);
3470 regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, RT5645_CBJ_BST1_EN,
3471 0);
3472}
3473
3459static struct i2c_driver rt5645_i2c_driver = { 3474static struct i2c_driver rt5645_i2c_driver = {
3460 .driver = { 3475 .driver = {
3461 .name = "rt5645", 3476 .name = "rt5645",
3462 .acpi_match_table = ACPI_PTR(rt5645_acpi_match), 3477 .acpi_match_table = ACPI_PTR(rt5645_acpi_match),
3463 }, 3478 },
3464 .probe = rt5645_i2c_probe, 3479 .probe = rt5645_i2c_probe,
3465 .remove = rt5645_i2c_remove, 3480 .remove = rt5645_i2c_remove,
3481 .shutdown = rt5645_i2c_shutdown,
3466 .id_table = rt5645_i2c_id, 3482 .id_table = rt5645_i2c_id,
3467}; 3483};
3468module_i2c_driver(rt5645_i2c_driver); 3484module_i2c_driver(rt5645_i2c_driver);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..0e4cfc6ac649 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
1693#define RT5645_GP6_PIN_SFT 6 1693#define RT5645_GP6_PIN_SFT 6
1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6) 1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) 1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
1696#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
1697#define RT5645_I2S2_DAC_PIN_SFT 4
1698#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
1699#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
1696#define RT5645_GP8_PIN_MASK (0x1 << 3) 1700#define RT5645_GP8_PIN_MASK (0x1 << 3)
1697#define RT5645_GP8_PIN_SFT 3 1701#define RT5645_GP8_PIN_SFT 3
1698#define RT5645_GP8_PIN_GPIO8 (0x0 << 3) 1702#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
@@ -2111,6 +2115,7 @@ enum {
2111#define RT5645_JD_PSV_MODE (0x1 << 12) 2115#define RT5645_JD_PSV_MODE (0x1 << 12)
2112#define RT5645_IRQ_CLK_GATE_CTRL (0x1 << 11) 2116#define RT5645_IRQ_CLK_GATE_CTRL (0x1 << 11)
2113#define RT5645_MICINDET_MANU (0x1 << 7) 2117#define RT5645_MICINDET_MANU (0x1 << 7)
2118#define RT5645_RING2_SLEEVE_GND (0x1 << 5)
2114 2119
2115/* Vendor ID (0xfd) */ 2120/* Vendor ID (0xfd) */
2116#define RT5645_VER_C 0x2 2121#define RT5645_VER_C 0x2
@@ -2177,32 +2182,6 @@ enum {
2177int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec, 2182int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec,
2178 unsigned int filter_mask, unsigned int clk_src); 2183 unsigned int filter_mask, unsigned int clk_src);
2179 2184
2180struct rt5645_priv {
2181 struct snd_soc_codec *codec;
2182 struct rt5645_platform_data pdata;
2183 struct regmap *regmap;
2184 struct i2c_client *i2c;
2185 struct gpio_desc *gpiod_hp_det;
2186 struct snd_soc_jack *hp_jack;
2187 struct snd_soc_jack *mic_jack;
2188 struct snd_soc_jack *btn_jack;
2189 struct delayed_work jack_detect_work;
2190
2191 int codec_type;
2192 int sysclk;
2193 int sysclk_src;
2194 int lrck[RT5645_AIFS];
2195 int bclk[RT5645_AIFS];
2196 int master[RT5645_AIFS];
2197
2198 int pll_src;
2199 int pll_in;
2200 int pll_out;
2201
2202 int jack_type;
2203 bool en_button_func;
2204};
2205
2206int rt5645_set_jack_detect(struct snd_soc_codec *codec, 2185int rt5645_set_jack_detect(struct snd_soc_codec *codec,
2207 struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack, 2186 struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
2208 struct snd_soc_jack *btn_jack); 2187 struct snd_soc_jack *btn_jack);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 872121015dfc..8f7159ba6ca2 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -46,7 +46,7 @@ static const struct regmap_range_cfg rt5651_ranges[] = {
46 .window_len = 0x1, }, 46 .window_len = 0x1, },
47}; 47};
48 48
49static struct reg_default init_list[] = { 49static const struct reg_sequence init_list[] = {
50 {RT5651_PR_BASE + 0x3d, 0x3e00}, 50 {RT5651_PR_BASE + 0x3d, 0x3e00},
51}; 51};
52 52
@@ -378,10 +378,11 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
378{ 378{
379 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); 379 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
380 struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec); 380 struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
381 int idx = -EINVAL; 381 int idx, rate;
382
383 idx = rl6231_calc_dmic_clk(rt5651->sysclk);
384 382
383 rate = rt5651->sysclk / rl6231_get_pre_div(rt5651->regmap,
384 RT5651_ADDA_CLK1, RT5651_I2S_PD1_SFT);
385 idx = rl6231_calc_dmic_clk(rate);
385 if (idx < 0) 386 if (idx < 0)
386 dev_err(codec->dev, "Failed to set DMIC clock\n"); 387 dev_err(codec->dev, "Failed to set DMIC clock\n");
387 else 388 else
@@ -1769,7 +1770,7 @@ static int rt5651_i2c_probe(struct i2c_client *i2c,
1769 regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret); 1770 regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret);
1770 if (ret != RT5651_DEVICE_ID_VALUE) { 1771 if (ret != RT5651_DEVICE_ID_VALUE) {
1771 dev_err(&i2c->dev, 1772 dev_err(&i2c->dev,
1772 "Device with ID register %x is not rt5651\n", ret); 1773 "Device with ID register %#x is not rt5651\n", ret);
1773 return -ENODEV; 1774 return -ENODEV;
1774 } 1775 }
1775 1776
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 8f9ab2b493ec..177748af94fc 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -51,7 +51,7 @@ static const struct regmap_range_cfg rt5670_ranges[] = {
51 .window_len = 0x1, }, 51 .window_len = 0x1, },
52}; 52};
53 53
54static const struct reg_default init_list[] = { 54static const struct reg_sequence init_list[] = {
55 { RT5670_PR_BASE + 0x14, 0x9a8a }, 55 { RT5670_PR_BASE + 0x14, 0x9a8a },
56 { RT5670_PR_BASE + 0x38, 0x3ba1 }, 56 { RT5670_PR_BASE + 0x38, 0x3ba1 },
57 { RT5670_PR_BASE + 0x3d, 0x3640 }, 57 { RT5670_PR_BASE + 0x3d, 0x3640 },
@@ -683,10 +683,11 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
683{ 683{
684 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); 684 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
685 struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec); 685 struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
686 int idx = -EINVAL; 686 int idx, rate;
687
688 idx = rl6231_calc_dmic_clk(rt5670->sysclk);
689 687
688 rate = rt5670->sysclk / rl6231_get_pre_div(rt5670->regmap,
689 RT5670_ADDA_CLK1, RT5670_I2S_PD1_SFT);
690 idx = rl6231_calc_dmic_clk(rate);
690 if (idx < 0) 691 if (idx < 0)
691 dev_err(codec->dev, "Failed to set DMIC clock\n"); 692 dev_err(codec->dev, "Failed to set DMIC clock\n");
692 else 693 else
@@ -2720,7 +2721,7 @@ static int rt5670_resume(struct snd_soc_codec *codec)
2720#define RT5670_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ 2721#define RT5670_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
2721 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8) 2722 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
2722 2723
2723static struct snd_soc_dai_ops rt5670_aif_dai_ops = { 2724static const struct snd_soc_dai_ops rt5670_aif_dai_ops = {
2724 .hw_params = rt5670_hw_params, 2725 .hw_params = rt5670_hw_params,
2725 .set_fmt = rt5670_set_dai_fmt, 2726 .set_fmt = rt5670_set_dai_fmt,
2726 .set_sysclk = rt5670_set_dai_sysclk, 2727 .set_sysclk = rt5670_set_dai_sysclk,
@@ -2863,7 +2864,7 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
2863 regmap_read(rt5670->regmap, RT5670_VENDOR_ID2, &val); 2864 regmap_read(rt5670->regmap, RT5670_VENDOR_ID2, &val);
2864 if (val != RT5670_DEVICE_ID) { 2865 if (val != RT5670_DEVICE_ID) {
2865 dev_err(&i2c->dev, 2866 dev_err(&i2c->dev,
2866 "Device with ID register %x is not rt5670/72\n", val); 2867 "Device with ID register %#x is not rt5670/72\n", val);
2867 return -ENODEV; 2868 return -ENODEV;
2868 } 2869 }
2869 2870
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index ef6348cb9157..3505aafbade4 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -31,84 +31,197 @@
31 31
32#include "rt5677-spi.h" 32#include "rt5677-spi.h"
33 33
34#define RT5677_SPI_BURST_LEN 240
35#define RT5677_SPI_HEADER 5
36#define RT5677_SPI_FREQ 6000000
37
38/* The AddressPhase and DataPhase of SPI commands are MSB first on the wire.
39 * DataPhase word size of 16-bit commands is 2 bytes.
40 * DataPhase word size of 32-bit commands is 4 bytes.
41 * DataPhase word size of burst commands is 8 bytes.
42 * The DSP CPU is little-endian.
43 */
44#define RT5677_SPI_WRITE_BURST 0x5
45#define RT5677_SPI_READ_BURST 0x4
46#define RT5677_SPI_WRITE_32 0x3
47#define RT5677_SPI_READ_32 0x2
48#define RT5677_SPI_WRITE_16 0x1
49#define RT5677_SPI_READ_16 0x0
50
34static struct spi_device *g_spi; 51static struct spi_device *g_spi;
52static DEFINE_MUTEX(spi_mutex);
35 53
36/** 54/* Select a suitable transfer command for the next transfer to ensure
37 * rt5677_spi_write - Write data to SPI. 55 * the transfer address is always naturally aligned while minimizing
38 * @txbuf: Data Buffer for writing. 56 * the total number of transfers required.
39 * @len: Data length. 57 *
58 * 3 transfer commands are available:
59 * RT5677_SPI_READ/WRITE_16: Transfer 2 bytes
60 * RT5677_SPI_READ/WRITE_32: Transfer 4 bytes
61 * RT5677_SPI_READ/WRITE_BURST: Transfer any multiples of 8 bytes
62 *
63 * For example, reading 260 bytes at 0x60030002 uses the following commands:
64 * 0x60030002 RT5677_SPI_READ_16 2 bytes
65 * 0x60030004 RT5677_SPI_READ_32 4 bytes
66 * 0x60030008 RT5677_SPI_READ_BURST 240 bytes
67 * 0x600300F8 RT5677_SPI_READ_BURST 8 bytes
68 * 0x60030100 RT5677_SPI_READ_32 4 bytes
69 * 0x60030104 RT5677_SPI_READ_16 2 bytes
40 * 70 *
71 * Input:
72 * @read: true for read commands; false for write commands
73 * @align: alignment of the next transfer address
74 * @remain: number of bytes remaining to transfer
41 * 75 *
42 * Returns true for success. 76 * Output:
77 * @len: number of bytes to transfer with the selected command
78 * Returns the selected command
43 */ 79 */
44int rt5677_spi_write(u8 *txbuf, size_t len) 80static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
45{ 81{
46 int status; 82 u8 cmd;
47 83
48 status = spi_write(g_spi, txbuf, len); 84 if (align == 2 || align == 6 || remain == 2) {
49 85 cmd = RT5677_SPI_READ_16;
50 if (status) 86 *len = 2;
51 dev_err(&g_spi->dev, "rt5677_spi_write error %d\n", status); 87 } else if (align == 4 || remain <= 6) {
52 88 cmd = RT5677_SPI_READ_32;
53 return status; 89 *len = 4;
90 } else {
91 cmd = RT5677_SPI_READ_BURST;
92 *len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
93 }
94 return read ? cmd : cmd + 1;
54} 95}
55EXPORT_SYMBOL_GPL(rt5677_spi_write);
56 96
57/** 97/* Copy dstlen bytes from src to dst, while reversing byte order for each word.
58 * rt5677_spi_burst_write - Write data to SPI by rt5677 dsp memory address. 98 * If srclen < dstlen, zeros are padded.
59 * @addr: Start address.
60 * @txbuf: Data Buffer for writng.
61 * @len: Data length, it must be a multiple of 8.
62 *
63 *
64 * Returns true for success.
65 */ 99 */
66int rt5677_spi_burst_write(u32 addr, const struct firmware *fw) 100static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
67{ 101{
68 u8 spi_cmd = RT5677_SPI_CMD_BURST_WRITE; 102 u32 w, i, si;
69 u8 *write_buf; 103 u32 word_size = min_t(u32, dstlen, 8);
70 unsigned int i, end, offset = 0; 104
71 105 for (w = 0; w < dstlen; w += word_size) {
72 write_buf = kmalloc(RT5677_SPI_BUF_LEN + 6, GFP_KERNEL); 106 for (i = 0; i < word_size; i++) {
73 107 si = w + word_size - i - 1;
74 if (write_buf == NULL) 108 dst[w + i] = si < srclen ? src[si] : 0;
75 return -ENOMEM;
76
77 while (offset < fw->size) {
78 if (offset + RT5677_SPI_BUF_LEN <= fw->size)
79 end = RT5677_SPI_BUF_LEN;
80 else
81 end = fw->size % RT5677_SPI_BUF_LEN;
82
83 write_buf[0] = spi_cmd;
84 write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
85 write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
86 write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
87 write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
88
89 for (i = 0; i < end; i += 8) {
90 write_buf[i + 12] = fw->data[offset + i + 0];
91 write_buf[i + 11] = fw->data[offset + i + 1];
92 write_buf[i + 10] = fw->data[offset + i + 2];
93 write_buf[i + 9] = fw->data[offset + i + 3];
94 write_buf[i + 8] = fw->data[offset + i + 4];
95 write_buf[i + 7] = fw->data[offset + i + 5];
96 write_buf[i + 6] = fw->data[offset + i + 6];
97 write_buf[i + 5] = fw->data[offset + i + 7];
98 } 109 }
110 }
111}
99 112
100 write_buf[end + 5] = spi_cmd; 113/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
114int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
115{
116 u32 offset;
117 int status = 0;
118 struct spi_transfer t[2];
119 struct spi_message m;
120 /* +4 bytes is for the DummyPhase following the AddressPhase */
121 u8 header[RT5677_SPI_HEADER + 4];
122 u8 body[RT5677_SPI_BURST_LEN];
123 u8 spi_cmd;
124 u8 *cb = rxbuf;
125
126 if (!g_spi)
127 return -ENODEV;
128
129 if ((addr & 1) || (len & 1)) {
130 dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
131 return -EACCES;
132 }
101 133
102 rt5677_spi_write(write_buf, end + 6); 134 memset(t, 0, sizeof(t));
135 t[0].tx_buf = header;
136 t[0].len = sizeof(header);
137 t[0].speed_hz = RT5677_SPI_FREQ;
138 t[1].rx_buf = body;
139 t[1].speed_hz = RT5677_SPI_FREQ;
140 spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
141
142 for (offset = 0; offset < len; offset += t[1].len) {
143 spi_cmd = rt5677_spi_select_cmd(true, (addr + offset) & 7,
144 len - offset, &t[1].len);
145
146 /* Construct SPI message header */
147 header[0] = spi_cmd;
148 header[1] = ((addr + offset) & 0xff000000) >> 24;
149 header[2] = ((addr + offset) & 0x00ff0000) >> 16;
150 header[3] = ((addr + offset) & 0x0000ff00) >> 8;
151 header[4] = ((addr + offset) & 0x000000ff) >> 0;
152
153 mutex_lock(&spi_mutex);
154 status |= spi_sync(g_spi, &m);
155 mutex_unlock(&spi_mutex);
156
157 /* Copy data back to caller buffer */
158 rt5677_spi_reverse(cb + offset, t[1].len, body, t[1].len);
159 }
160 return status;
161}
162EXPORT_SYMBOL_GPL(rt5677_spi_read);
103 163
104 offset += RT5677_SPI_BUF_LEN; 164/* Write DSP address space using SPI. addr has to be 2-byte aligned.
165 * If len is not 2-byte aligned, an extra byte of zero is written at the end
166 * as padding.
167 */
168int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
169{
170 u32 offset, len_with_pad = len;
171 int status = 0;
172 struct spi_transfer t;
173 struct spi_message m;
174 /* +1 byte is for the DummyPhase following the DataPhase */
175 u8 buf[RT5677_SPI_HEADER + RT5677_SPI_BURST_LEN + 1];
176 u8 *body = buf + RT5677_SPI_HEADER;
177 u8 spi_cmd;
178 const u8 *cb = txbuf;
179
180 if (!g_spi)
181 return -ENODEV;
182
183 if (addr & 1) {
184 dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
185 return -EACCES;
105 } 186 }
106 187
107 kfree(write_buf); 188 if (len & 1)
189 len_with_pad = len + 1;
190
191 memset(&t, 0, sizeof(t));
192 t.tx_buf = buf;
193 t.speed_hz = RT5677_SPI_FREQ;
194 spi_message_init_with_transfers(&m, &t, 1);
195
196 for (offset = 0; offset < len_with_pad;) {
197 spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
198 len_with_pad - offset, &t.len);
199
200 /* Construct SPI message header */
201 buf[0] = spi_cmd;
202 buf[1] = ((addr + offset) & 0xff000000) >> 24;
203 buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
204 buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
205 buf[4] = ((addr + offset) & 0x000000ff) >> 0;
206
207 /* Fetch data from caller buffer */
208 rt5677_spi_reverse(body, t.len, cb + offset, len - offset);
209 offset += t.len;
210 t.len += RT5677_SPI_HEADER + 1;
211
212 mutex_lock(&spi_mutex);
213 status |= spi_sync(g_spi, &m);
214 mutex_unlock(&spi_mutex);
215 }
216 return status;
217}
218EXPORT_SYMBOL_GPL(rt5677_spi_write);
108 219
109 return 0; 220int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
221{
222 return rt5677_spi_write(addr, fw->data, fw->size);
110} 223}
111EXPORT_SYMBOL_GPL(rt5677_spi_burst_write); 224EXPORT_SYMBOL_GPL(rt5677_spi_write_firmware);
112 225
113static int rt5677_spi_probe(struct spi_device *spi) 226static int rt5677_spi_probe(struct spi_device *spi)
114{ 227{
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
index ec41b2b3b2ca..662db16cfb6a 100644
--- a/sound/soc/codecs/rt5677-spi.h
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -12,10 +12,8 @@
12#ifndef __RT5677_SPI_H__ 12#ifndef __RT5677_SPI_H__
13#define __RT5677_SPI_H__ 13#define __RT5677_SPI_H__
14 14
15#define RT5677_SPI_BUF_LEN 240 15int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
16#define RT5677_SPI_CMD_BURST_WRITE 0x05 16int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
17 17int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
18int rt5677_spi_write(u8 *txbuf, size_t len);
19int rt5677_spi_burst_write(u32 addr, const struct firmware *fw);
20 18
21#endif /* __RT5677_SPI_H__ */ 19#endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 03afec78a170..b7de51b09c35 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -15,13 +15,12 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/pm.h> 17#include <linux/pm.h>
18#include <linux/of_gpio.h>
19#include <linux/regmap.h> 18#include <linux/regmap.h>
20#include <linux/i2c.h> 19#include <linux/i2c.h>
21#include <linux/platform_device.h> 20#include <linux/platform_device.h>
22#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
23#include <linux/firmware.h> 22#include <linux/firmware.h>
24#include <linux/gpio.h> 23#include <linux/property.h>
25#include <sound/core.h> 24#include <sound/core.h>
26#include <sound/pcm.h> 25#include <sound/pcm.h>
27#include <sound/pcm_params.h> 26#include <sound/pcm_params.h>
@@ -54,7 +53,7 @@ static const struct regmap_range_cfg rt5677_ranges[] = {
54 }, 53 },
55}; 54};
56 55
57static const struct reg_default init_list[] = { 56static const struct reg_sequence init_list[] = {
58 {RT5677_ASRC_12, 0x0018}, 57 {RT5677_ASRC_12, 0x0018},
59 {RT5677_PR_BASE + 0x3d, 0x364d}, 58 {RT5677_PR_BASE + 0x3d, 0x364d},
60 {RT5677_PR_BASE + 0x17, 0x4fc0}, 59 {RT5677_PR_BASE + 0x17, 0x4fc0},
@@ -746,14 +745,14 @@ static int rt5677_set_dsp_vad(struct snd_soc_codec *codec, bool on)
746 ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1, 745 ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
747 codec->dev); 746 codec->dev);
748 if (ret == 0) { 747 if (ret == 0) {
749 rt5677_spi_burst_write(0x50000000, rt5677->fw1); 748 rt5677_spi_write_firmware(0x50000000, rt5677->fw1);
750 release_firmware(rt5677->fw1); 749 release_firmware(rt5677->fw1);
751 } 750 }
752 751
753 ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2, 752 ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
754 codec->dev); 753 codec->dev);
755 if (ret == 0) { 754 if (ret == 0) {
756 rt5677_spi_burst_write(0x60000000, rt5677->fw2); 755 rt5677_spi_write_firmware(0x60000000, rt5677->fw2);
757 release_firmware(rt5677->fw2); 756 release_firmware(rt5677->fw2);
758 } 757 }
759 758
@@ -917,8 +916,11 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
917{ 916{
918 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); 917 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
919 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 918 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
920 int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8); 919 int idx, rate;
921 920
921 rate = rt5677->sysclk / rl6231_get_pre_div(rt5677->regmap,
922 RT5677_CLK_TREE_CTRL1, RT5677_I2S_PD1_SFT);
923 idx = rl6231_calc_dmic_clk(rate);
922 if (idx < 0) 924 if (idx < 0)
923 dev_err(codec->dev, "Failed to set DMIC clock\n"); 925 dev_err(codec->dev, "Failed to set DMIC clock\n");
924 else 926 else
@@ -4764,10 +4766,8 @@ static int rt5677_remove(struct snd_soc_codec *codec)
4764 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 4766 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
4765 4767
4766 regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec); 4768 regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
4767 if (gpio_is_valid(rt5677->pow_ldo2)) 4769 gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
4768 gpio_set_value_cansleep(rt5677->pow_ldo2, 0); 4770 gpiod_set_value_cansleep(rt5677->reset_pin, 0);
4769 if (gpio_is_valid(rt5677->reset_pin))
4770 gpio_set_value_cansleep(rt5677->reset_pin, 0);
4771 4771
4772 return 0; 4772 return 0;
4773} 4773}
@@ -4781,10 +4781,8 @@ static int rt5677_suspend(struct snd_soc_codec *codec)
4781 regcache_cache_only(rt5677->regmap, true); 4781 regcache_cache_only(rt5677->regmap, true);
4782 regcache_mark_dirty(rt5677->regmap); 4782 regcache_mark_dirty(rt5677->regmap);
4783 4783
4784 if (gpio_is_valid(rt5677->pow_ldo2)) 4784 gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
4785 gpio_set_value_cansleep(rt5677->pow_ldo2, 0); 4785 gpiod_set_value_cansleep(rt5677->reset_pin, 0);
4786 if (gpio_is_valid(rt5677->reset_pin))
4787 gpio_set_value_cansleep(rt5677->reset_pin, 0);
4788 } 4786 }
4789 4787
4790 return 0; 4788 return 0;
@@ -4795,12 +4793,9 @@ static int rt5677_resume(struct snd_soc_codec *codec)
4795 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); 4793 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
4796 4794
4797 if (!rt5677->dsp_vad_en) { 4795 if (!rt5677->dsp_vad_en) {
4798 if (gpio_is_valid(rt5677->pow_ldo2)) 4796 gpiod_set_value_cansleep(rt5677->pow_ldo2, 1);
4799 gpio_set_value_cansleep(rt5677->pow_ldo2, 1); 4797 gpiod_set_value_cansleep(rt5677->reset_pin, 1);
4800 if (gpio_is_valid(rt5677->reset_pin)) 4798 if (rt5677->pow_ldo2 || rt5677->reset_pin)
4801 gpio_set_value_cansleep(rt5677->reset_pin, 1);
4802 if (gpio_is_valid(rt5677->pow_ldo2) ||
4803 gpio_is_valid(rt5677->reset_pin))
4804 msleep(10); 4799 msleep(10);
4805 4800
4806 regcache_cache_only(rt5677->regmap, false); 4801 regcache_cache_only(rt5677->regmap, false);
@@ -4863,7 +4858,7 @@ static int rt5677_write(void *context, unsigned int reg, unsigned int val)
4863#define RT5677_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ 4858#define RT5677_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
4864 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8) 4859 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
4865 4860
4866static struct snd_soc_dai_ops rt5677_aif_dai_ops = { 4861static const struct snd_soc_dai_ops rt5677_aif_dai_ops = {
4867 .hw_params = rt5677_hw_params, 4862 .hw_params = rt5677_hw_params,
4868 .set_fmt = rt5677_set_dai_fmt, 4863 .set_fmt = rt5677_set_dai_fmt,
4869 .set_sysclk = rt5677_set_dai_sysclk, 4864 .set_sysclk = rt5677_set_dai_sysclk,
@@ -5024,45 +5019,29 @@ static const struct i2c_device_id rt5677_i2c_id[] = {
5024}; 5019};
5025MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id); 5020MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
5026 5021
5027static int rt5677_parse_dt(struct rt5677_priv *rt5677, struct device_node *np) 5022static void rt5677_read_device_properties(struct rt5677_priv *rt5677,
5023 struct device *dev)
5028{ 5024{
5029 rt5677->pdata.in1_diff = of_property_read_bool(np, 5025 rt5677->pdata.in1_diff = device_property_read_bool(dev,
5030 "realtek,in1-differential"); 5026 "realtek,in1-differential");
5031 rt5677->pdata.in2_diff = of_property_read_bool(np, 5027 rt5677->pdata.in2_diff = device_property_read_bool(dev,
5032 "realtek,in2-differential"); 5028 "realtek,in2-differential");
5033 rt5677->pdata.lout1_diff = of_property_read_bool(np, 5029 rt5677->pdata.lout1_diff = device_property_read_bool(dev,
5034 "realtek,lout1-differential"); 5030 "realtek,lout1-differential");
5035 rt5677->pdata.lout2_diff = of_property_read_bool(np, 5031 rt5677->pdata.lout2_diff = device_property_read_bool(dev,
5036 "realtek,lout2-differential"); 5032 "realtek,lout2-differential");
5037 rt5677->pdata.lout3_diff = of_property_read_bool(np, 5033 rt5677->pdata.lout3_diff = device_property_read_bool(dev,
5038 "realtek,lout3-differential"); 5034 "realtek,lout3-differential");
5039 5035
5040 rt5677->pow_ldo2 = of_get_named_gpio(np, 5036 device_property_read_u8_array(dev, "realtek,gpio-config",
5041 "realtek,pow-ldo2-gpio", 0); 5037 rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
5042 rt5677->reset_pin = of_get_named_gpio(np, 5038
5043 "realtek,reset-gpio", 0); 5039 device_property_read_u32(dev, "realtek,jd1-gpio",
5044 5040 &rt5677->pdata.jd1_gpio);
5045 /* 5041 device_property_read_u32(dev, "realtek,jd2-gpio",
5046 * POW_LDO2 is optional (it may be statically tied on the board). 5042 &rt5677->pdata.jd2_gpio);
5047 * -ENOENT means that the property doesn't exist, i.e. there is no 5043 device_property_read_u32(dev, "realtek,jd3-gpio",
5048 * GPIO, so is not an error. Any other error code means the property 5044 &rt5677->pdata.jd3_gpio);
5049 * exists, but could not be parsed.
5050 */
5051 if (!gpio_is_valid(rt5677->pow_ldo2) &&
5052 (rt5677->pow_ldo2 != -ENOENT))
5053 return rt5677->pow_ldo2;
5054 if (!gpio_is_valid(rt5677->reset_pin) &&
5055 (rt5677->reset_pin != -ENOENT))
5056 return rt5677->reset_pin;
5057
5058 of_property_read_u8_array(np, "realtek,gpio-config",
5059 rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
5060
5061 of_property_read_u32(np, "realtek,jd1-gpio", &rt5677->pdata.jd1_gpio);
5062 of_property_read_u32(np, "realtek,jd2-gpio", &rt5677->pdata.jd2_gpio);
5063 of_property_read_u32(np, "realtek,jd3-gpio", &rt5677->pdata.jd3_gpio);
5064
5065 return 0;
5066} 5045}
5067 5046
5068static struct regmap_irq rt5677_irqs[] = { 5047static struct regmap_irq rt5677_irqs[] = {
@@ -5145,43 +5124,29 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
5145 5124
5146 if (pdata) 5125 if (pdata)
5147 rt5677->pdata = *pdata; 5126 rt5677->pdata = *pdata;
5127 else
5128 rt5677_read_device_properties(rt5677, &i2c->dev);
5148 5129
5149 if (i2c->dev.of_node) { 5130 /* pow-ldo2 and reset are optional. The codec pins may be statically
5150 ret = rt5677_parse_dt(rt5677, i2c->dev.of_node); 5131 * connected on the board without gpios. If the gpio device property
5151 if (ret) { 5132 * isn't specified, devm_gpiod_get_optional returns NULL.
5152 dev_err(&i2c->dev, "Failed to parse device tree: %d\n", 5133 */
5153 ret); 5134 rt5677->pow_ldo2 = devm_gpiod_get_optional(&i2c->dev,
5154 return ret; 5135 "realtek,pow-ldo2", GPIOD_OUT_HIGH);
5155 } 5136 if (IS_ERR(rt5677->pow_ldo2)) {
5156 } else { 5137 ret = PTR_ERR(rt5677->pow_ldo2);
5157 rt5677->pow_ldo2 = -EINVAL; 5138 dev_err(&i2c->dev, "Failed to request POW_LDO2: %d\n", ret);
5158 rt5677->reset_pin = -EINVAL; 5139 return ret;
5159 }
5160
5161 if (gpio_is_valid(rt5677->pow_ldo2)) {
5162 ret = devm_gpio_request_one(&i2c->dev, rt5677->pow_ldo2,
5163 GPIOF_OUT_INIT_HIGH,
5164 "RT5677 POW_LDO2");
5165 if (ret < 0) {
5166 dev_err(&i2c->dev, "Failed to request POW_LDO2 %d: %d\n",
5167 rt5677->pow_ldo2, ret);
5168 return ret;
5169 }
5170 } 5140 }
5171 5141 rt5677->reset_pin = devm_gpiod_get_optional(&i2c->dev,
5172 if (gpio_is_valid(rt5677->reset_pin)) { 5142 "realtek,reset", GPIOD_OUT_HIGH);
5173 ret = devm_gpio_request_one(&i2c->dev, rt5677->reset_pin, 5143 if (IS_ERR(rt5677->reset_pin)) {
5174 GPIOF_OUT_INIT_HIGH, 5144 ret = PTR_ERR(rt5677->reset_pin);
5175 "RT5677 RESET"); 5145 dev_err(&i2c->dev, "Failed to request RESET: %d\n", ret);
5176 if (ret < 0) { 5146 return ret;
5177 dev_err(&i2c->dev, "Failed to request RESET %d: %d\n",
5178 rt5677->reset_pin, ret);
5179 return ret;
5180 }
5181 } 5147 }
5182 5148
5183 if (gpio_is_valid(rt5677->pow_ldo2) || 5149 if (rt5677->pow_ldo2 || rt5677->reset_pin) {
5184 gpio_is_valid(rt5677->reset_pin)) {
5185 /* Wait a while until I2C bus becomes available. The datasheet 5150 /* Wait a while until I2C bus becomes available. The datasheet
5186 * does not specify the exact we should wait but startup 5151 * does not specify the exact we should wait but startup
5187 * sequence mentiones at least a few milliseconds. 5152 * sequence mentiones at least a few milliseconds.
@@ -5209,7 +5174,7 @@ static int rt5677_i2c_probe(struct i2c_client *i2c,
5209 regmap_read(rt5677->regmap, RT5677_VENDOR_ID2, &val); 5174 regmap_read(rt5677->regmap, RT5677_VENDOR_ID2, &val);
5210 if (val != RT5677_DEVICE_ID) { 5175 if (val != RT5677_DEVICE_ID) {
5211 dev_err(&i2c->dev, 5176 dev_err(&i2c->dev,
5212 "Device with ID register %x is not rt5677\n", val); 5177 "Device with ID register %#x is not rt5677\n", val);
5213 return -ENODEV; 5178 return -ENODEV;
5214 } 5179 }
5215 5180
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 7eca38a23255..d46855a42c40 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -14,6 +14,7 @@
14 14
15#include <sound/rt5677.h> 15#include <sound/rt5677.h>
16#include <linux/gpio/driver.h> 16#include <linux/gpio/driver.h>
17#include <linux/gpio/consumer.h>
17 18
18/* Info */ 19/* Info */
19#define RT5677_RESET 0x00 20#define RT5677_RESET 0x00
@@ -1775,8 +1776,8 @@ struct rt5677_priv {
1775 int pll_src; 1776 int pll_src;
1776 int pll_in; 1777 int pll_in;
1777 int pll_out; 1778 int pll_out;
1778 int pow_ldo2; /* POW_LDO2 pin */ 1779 struct gpio_desc *pow_ldo2; /* POW_LDO2 pin */
1779 int reset_pin; /* RESET pin */ 1780 struct gpio_desc *reset_pin; /* RESET pin */
1780 enum rt5677_type type; 1781 enum rt5677_type type;
1781#ifdef CONFIG_GPIOLIB 1782#ifdef CONFIG_GPIOLIB
1782 struct gpio_chip gpio_chip; 1783 struct gpio_chip gpio_chip;
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index bd7a344bf8c5..1c317de26176 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -275,7 +275,7 @@
275#define SGTL5000_BIAS_CTRL_MASK 0x000e 275#define SGTL5000_BIAS_CTRL_MASK 0x000e
276#define SGTL5000_BIAS_CTRL_SHIFT 1 276#define SGTL5000_BIAS_CTRL_SHIFT 1
277#define SGTL5000_BIAS_CTRL_WIDTH 3 277#define SGTL5000_BIAS_CTRL_WIDTH 3
278#define SGTL5000_SMALL_POP 0 278#define SGTL5000_SMALL_POP 1
279 279
280/* 280/*
281 * SGTL5000_CHIP_MIC_CTRL 281 * SGTL5000_CHIP_MIC_CTRL
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 3e72964280c6..a8402d0af0ea 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -208,7 +208,7 @@ out:
208 return err; 208 return err;
209} 209}
210 210
211static struct snd_soc_dai_ops si476x_dai_ops = { 211static const struct snd_soc_dai_ops si476x_dai_ops = {
212 .hw_params = si476x_codec_hw_params, 212 .hw_params = si476x_codec_hw_params,
213 .set_fmt = si476x_codec_set_dai_fmt, 213 .set_fmt = si476x_codec_set_dai_fmt,
214}; 214};
diff --git a/sound/soc/codecs/sirf-audio-codec.c b/sound/soc/codecs/sirf-audio-codec.c
index 29cb44256044..6bfd25c289d1 100644
--- a/sound/soc/codecs/sirf-audio-codec.c
+++ b/sound/soc/codecs/sirf-audio-codec.c
@@ -370,11 +370,11 @@ static int sirf_audio_codec_trigger(struct snd_pcm_substream *substream,
370 return 0; 370 return 0;
371} 371}
372 372
373struct snd_soc_dai_ops sirf_audio_codec_dai_ops = { 373static const struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
374 .trigger = sirf_audio_codec_trigger, 374 .trigger = sirf_audio_codec_trigger,
375}; 375};
376 376
377struct snd_soc_dai_driver sirf_audio_codec_dai = { 377static struct snd_soc_dai_driver sirf_audio_codec_dai = {
378 .name = "sirf-audio-codec", 378 .name = "sirf-audio-codec",
379 .playback = { 379 .playback = {
380 .stream_name = "Playback", 380 .stream_name = "Playback",
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index f3f1f6874c72..e619d5651b09 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -10,6 +10,7 @@
10 * Licensed under the GPL-2. 10 * Licensed under the GPL-2.
11 */ 11 */
12 12
13#include <linux/acpi.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/i2c.h> 16#include <linux/i2c.h>
@@ -173,6 +174,12 @@ static const struct snd_soc_dapm_widget ssm4567_dapm_widgets[] = {
173 SND_SOC_DAPM_SWITCH("Amplifier Boost", SSM4567_REG_POWER_CTRL, 3, 1, 174 SND_SOC_DAPM_SWITCH("Amplifier Boost", SSM4567_REG_POWER_CTRL, 3, 1,
174 &ssm4567_amplifier_boost_control), 175 &ssm4567_amplifier_boost_control),
175 176
177 SND_SOC_DAPM_SIGGEN("Sense"),
178
179 SND_SOC_DAPM_PGA("Current Sense", SSM4567_REG_POWER_CTRL, 4, 1, NULL, 0),
180 SND_SOC_DAPM_PGA("Voltage Sense", SSM4567_REG_POWER_CTRL, 5, 1, NULL, 0),
181 SND_SOC_DAPM_PGA("VBAT Sense", SSM4567_REG_POWER_CTRL, 6, 1, NULL, 0),
182
176 SND_SOC_DAPM_OUTPUT("OUT"), 183 SND_SOC_DAPM_OUTPUT("OUT"),
177}; 184};
178 185
@@ -180,6 +187,13 @@ static const struct snd_soc_dapm_route ssm4567_routes[] = {
180 { "OUT", NULL, "Amplifier Boost" }, 187 { "OUT", NULL, "Amplifier Boost" },
181 { "Amplifier Boost", "Switch", "DAC" }, 188 { "Amplifier Boost", "Switch", "DAC" },
182 { "OUT", NULL, "DAC" }, 189 { "OUT", NULL, "DAC" },
190
191 { "Current Sense", NULL, "Sense" },
192 { "Voltage Sense", NULL, "Sense" },
193 { "VBAT Sense", NULL, "Sense" },
194 { "Capture Sense", NULL, "Current Sense" },
195 { "Capture Sense", NULL, "Voltage Sense" },
196 { "Capture Sense", NULL, "VBAT Sense" },
183}; 197};
184 198
185static int ssm4567_hw_params(struct snd_pcm_substream *substream, 199static int ssm4567_hw_params(struct snd_pcm_substream *substream,
@@ -315,7 +329,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
315 if (invert_fclk) 329 if (invert_fclk)
316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC; 330 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
317 331
318 return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1); 332 return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
333 SSM4567_SAI_CTRL_1_BCLK |
334 SSM4567_SAI_CTRL_1_FSYNC |
335 SSM4567_SAI_CTRL_1_LJ |
336 SSM4567_SAI_CTRL_1_TDM |
337 SSM4567_SAI_CTRL_1_PDM,
338 ctrl1);
319} 339}
320 340
321static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable) 341static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
@@ -381,6 +401,14 @@ static struct snd_soc_dai_driver ssm4567_dai = {
381 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | 401 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
382 SNDRV_PCM_FMTBIT_S32, 402 SNDRV_PCM_FMTBIT_S32,
383 }, 403 },
404 .capture = {
405 .stream_name = "Capture Sense",
406 .channels_min = 1,
407 .channels_max = 1,
408 .rates = SNDRV_PCM_RATE_8000_192000,
409 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
410 SNDRV_PCM_FMTBIT_S32,
411 },
384 .ops = &ssm4567_dai_ops, 412 .ops = &ssm4567_dai_ops,
385}; 413};
386 414
@@ -450,9 +478,20 @@ static const struct i2c_device_id ssm4567_i2c_ids[] = {
450}; 478};
451MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids); 479MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids);
452 480
481#ifdef CONFIG_ACPI
482
483static const struct acpi_device_id ssm4567_acpi_match[] = {
484 { "INT343B", 0 },
485 {},
486};
487MODULE_DEVICE_TABLE(acpi, ssm4567_acpi_match);
488
489#endif
490
453static struct i2c_driver ssm4567_driver = { 491static struct i2c_driver ssm4567_driver = {
454 .driver = { 492 .driver = {
455 .name = "ssm4567", 493 .name = "ssm4567",
494 .acpi_match_table = ACPI_PTR(ssm4567_acpi_match),
456 }, 495 },
457 .probe = ssm4567_i2c_probe, 496 .probe = ssm4567_i2c_probe,
458 .remove = ssm4567_i2c_remove, 497 .remove = ssm4567_i2c_remove,
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 3430f444c1ae..2cdaca943a8c 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -339,9 +339,6 @@ static int sta529_i2c_probe(struct i2c_client *i2c,
339 struct sta529 *sta529; 339 struct sta529 *sta529;
340 int ret; 340 int ret;
341 341
342 if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
343 return -EINVAL;
344
345 sta529 = devm_kzalloc(&i2c->dev, sizeof(struct sta529), GFP_KERNEL); 342 sta529 = devm_kzalloc(&i2c->dev, sizeof(struct sta529), GFP_KERNEL);
346 if (!sta529) 343 if (!sta529)
347 return -ENOMEM; 344 return -ENOMEM;
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index ed4cca7f6779..0945c51df003 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -28,6 +28,9 @@
28 28
29#include "stac9766.h" 29#include "stac9766.h"
30 30
31#define STAC9766_VENDOR_ID 0x83847666
32#define STAC9766_VENDOR_ID_MASK 0xffffffff
33
31/* 34/*
32 * STAC9766 register cache 35 * STAC9766 register cache
33 */ 36 */
@@ -239,45 +242,12 @@ static int stac9766_set_bias_level(struct snd_soc_codec *codec,
239 return 0; 242 return 0;
240} 243}
241 244
242static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
243{
244 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
245
246 if (try_warm && soc_ac97_ops->warm_reset) {
247 soc_ac97_ops->warm_reset(ac97);
248 if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
249 return 1;
250 }
251
252 soc_ac97_ops->reset(ac97);
253 if (soc_ac97_ops->warm_reset)
254 soc_ac97_ops->warm_reset(ac97);
255 if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
256 return -EIO;
257 return 0;
258}
259
260static int stac9766_codec_resume(struct snd_soc_codec *codec) 245static int stac9766_codec_resume(struct snd_soc_codec *codec)
261{ 246{
262 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec); 247 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
263 u16 id, reset;
264 248
265 reset = 0; 249 return snd_ac97_reset(ac97, true, STAC9766_VENDOR_ID,
266 /* give the codec an AC97 warm reset to start the link */ 250 STAC9766_VENDOR_ID_MASK);
267reset:
268 if (reset > 5) {
269 dev_err(codec->dev, "Failed to resume\n");
270 return -EIO;
271 }
272 ac97->bus->ops->warm_reset(ac97);
273 id = soc_ac97_ops->read(ac97, AC97_VENDOR_ID2);
274 if (id != 0x4c13) {
275 stac9766_reset(codec, 0);
276 reset++;
277 goto reset;
278 }
279
280 return 0;
281} 251}
282 252
283static const struct snd_soc_dai_ops stac9766_dai_ops_analog = { 253static const struct snd_soc_dai_ops stac9766_dai_ops_analog = {
@@ -330,28 +300,15 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
330static int stac9766_codec_probe(struct snd_soc_codec *codec) 300static int stac9766_codec_probe(struct snd_soc_codec *codec)
331{ 301{
332 struct snd_ac97 *ac97; 302 struct snd_ac97 *ac97;
333 int ret = 0;
334 303
335 ac97 = snd_soc_new_ac97_codec(codec); 304 ac97 = snd_soc_new_ac97_codec(codec, STAC9766_VENDOR_ID,
305 STAC9766_VENDOR_ID_MASK);
336 if (IS_ERR(ac97)) 306 if (IS_ERR(ac97))
337 return PTR_ERR(ac97); 307 return PTR_ERR(ac97);
338 308
339 snd_soc_codec_set_drvdata(codec, ac97); 309 snd_soc_codec_set_drvdata(codec, ac97);
340 310
341 /* do a cold reset for the controller and then try
342 * a warm reset followed by an optional cold reset for codec */
343 stac9766_reset(codec, 0);
344 ret = stac9766_reset(codec, 1);
345 if (ret < 0) {
346 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
347 goto codec_err;
348 }
349
350 return 0; 311 return 0;
351
352codec_err:
353 snd_soc_free_ac97_codec(ac97);
354 return ret;
355} 312}
356 313
357static int stac9766_codec_remove(struct snd_soc_codec *codec) 314static int stac9766_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
new file mode 100644
index 000000000000..160d61a66204
--- /dev/null
+++ b/sound/soc/codecs/sti-sas.c
@@ -0,0 +1,628 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2015
3 * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
4 * for STMicroelectronics.
5 * License terms: GNU General Public License (GPL), version 2
6 */
7
8#include <linux/io.h>
9#include <linux/module.h>
10#include <linux/regmap.h>
11#include <linux/reset.h>
12#include <linux/mfd/syscon.h>
13
14#include <sound/soc.h>
15#include <sound/soc-dapm.h>
16
17/* chipID supported */
18#define CHIPID_STIH416 0
19#define CHIPID_STIH407 1
20
21/* DAC definitions */
22
23/* stih416 DAC registers */
24/* sysconf 2517: Audio-DAC-Control */
25#define STIH416_AUDIO_DAC_CTRL 0x00000814
26/* sysconf 2519: Audio-Gue-Control */
27#define STIH416_AUDIO_GLUE_CTRL 0x0000081C
28
29#define STIH416_DAC_NOT_STANDBY 0x3
30#define STIH416_DAC_SOFTMUTE 0x4
31#define STIH416_DAC_ANA_NOT_PWR 0x5
32#define STIH416_DAC_NOT_PNDBG 0x6
33
34#define STIH416_DAC_NOT_STANDBY_MASK BIT(STIH416_DAC_NOT_STANDBY)
35#define STIH416_DAC_SOFTMUTE_MASK BIT(STIH416_DAC_SOFTMUTE)
36#define STIH416_DAC_ANA_NOT_PWR_MASK BIT(STIH416_DAC_ANA_NOT_PWR)
37#define STIH416_DAC_NOT_PNDBG_MASK BIT(STIH416_DAC_NOT_PNDBG)
38
39/* stih407 DAC registers */
40/* sysconf 5041: Audio-Gue-Control */
41#define STIH407_AUDIO_GLUE_CTRL 0x000000A4
42/* sysconf 5042: Audio-DAC-Control */
43#define STIH407_AUDIO_DAC_CTRL 0x000000A8
44
45/* DAC definitions */
46#define STIH407_DAC_SOFTMUTE 0x0
47#define STIH407_DAC_STANDBY_ANA 0x1
48#define STIH407_DAC_STANDBY 0x2
49
50#define STIH407_DAC_SOFTMUTE_MASK BIT(STIH407_DAC_SOFTMUTE)
51#define STIH407_DAC_STANDBY_ANA_MASK BIT(STIH407_DAC_STANDBY_ANA)
52#define STIH407_DAC_STANDBY_MASK BIT(STIH407_DAC_STANDBY)
53
54/* SPDIF definitions */
55#define SPDIF_BIPHASE_ENABLE 0x6
56#define SPDIF_BIPHASE_IDLE 0x7
57
58#define SPDIF_BIPHASE_ENABLE_MASK BIT(SPDIF_BIPHASE_ENABLE)
59#define SPDIF_BIPHASE_IDLE_MASK BIT(SPDIF_BIPHASE_IDLE)
60
61enum {
62 STI_SAS_DAI_SPDIF_OUT,
63 STI_SAS_DAI_ANALOG_OUT,
64};
65
66static const struct reg_default stih416_sas_reg_defaults[] = {
67 { STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
68 { STIH407_AUDIO_DAC_CTRL, 0x000000000 },
69};
70
71static const struct reg_default stih407_sas_reg_defaults[] = {
72 { STIH416_AUDIO_DAC_CTRL, 0x000000000 },
73 { STIH416_AUDIO_GLUE_CTRL, 0x00000040 },
74};
75
76struct sti_dac_audio {
77 struct regmap *regmap;
78 struct regmap *virt_regmap;
79 struct regmap_field **field;
80 struct reset_control *rst;
81 int mclk;
82};
83
84struct sti_spdif_audio {
85 struct regmap *regmap;
86 struct regmap_field **field;
87 int mclk;
88};
89
90/* device data structure */
91struct sti_sas_dev_data {
92 const int chipid; /* IC version */
93 const struct regmap_config *regmap;
94 const struct snd_soc_dai_ops *dac_ops; /* DAC function callbacks */
95 const struct snd_soc_dapm_widget *dapm_widgets; /* dapms declaration */
96 const int num_dapm_widgets; /* dapms declaration */
97 const struct snd_soc_dapm_route *dapm_routes; /* route declaration */
98 const int num_dapm_routes; /* route declaration */
99};
100
101/* driver data structure */
102struct sti_sas_data {
103 struct device *dev;
104 const struct sti_sas_dev_data *dev_data;
105 struct sti_dac_audio dac;
106 struct sti_spdif_audio spdif;
107};
108
109/* Read a register from the sysconf reg bank */
110static int sti_sas_read_reg(void *context, unsigned int reg,
111 unsigned int *value)
112{
113 struct sti_sas_data *drvdata = context;
114 int status;
115 u32 val;
116
117 status = regmap_read(drvdata->dac.regmap, reg, &val);
118 *value = (unsigned int)val;
119
120 return status;
121}
122
123/* Read a register from the sysconf reg bank */
124static int sti_sas_write_reg(void *context, unsigned int reg,
125 unsigned int value)
126{
127 struct sti_sas_data *drvdata = context;
128 int status;
129
130 status = regmap_write(drvdata->dac.regmap, reg, value);
131
132 return status;
133}
134
135static int sti_sas_init_sas_registers(struct snd_soc_codec *codec,
136 struct sti_sas_data *data)
137{
138 int ret;
139 /*
140 * DAC and SPDIF are activated by default
141 * put them in IDLE to save power
142 */
143
144 /* Initialise bi-phase formatter to disabled */
145 ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
146 SPDIF_BIPHASE_ENABLE_MASK, 0);
147
148 if (!ret)
149 /* Initialise bi-phase formatter idle value to 0 */
150 ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
151 SPDIF_BIPHASE_IDLE_MASK, 0);
152 if (ret < 0) {
153 dev_err(codec->dev, "Failed to update SPDIF registers");
154 return ret;
155 }
156
157 /* Init DAC configuration */
158 switch (data->dev_data->chipid) {
159 case CHIPID_STIH407:
160 /* init configuration */
161 ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
162 STIH407_DAC_STANDBY_MASK,
163 STIH407_DAC_STANDBY_MASK);
164
165 if (!ret)
166 ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
167 STIH407_DAC_STANDBY_ANA_MASK,
168 STIH407_DAC_STANDBY_ANA_MASK);
169 if (!ret)
170 ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
171 STIH407_DAC_SOFTMUTE_MASK,
172 STIH407_DAC_SOFTMUTE_MASK);
173 break;
174 case CHIPID_STIH416:
175 ret = snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
176 STIH416_DAC_NOT_STANDBY_MASK, 0);
177 if (!ret)
178 ret = snd_soc_update_bits(codec,
179 STIH416_AUDIO_DAC_CTRL,
180 STIH416_DAC_ANA_NOT_PWR, 0);
181 if (!ret)
182 ret = snd_soc_update_bits(codec,
183 STIH416_AUDIO_DAC_CTRL,
184 STIH416_DAC_NOT_PNDBG_MASK,
185 0);
186 if (!ret)
187 ret = snd_soc_update_bits(codec,
188 STIH416_AUDIO_DAC_CTRL,
189 STIH416_DAC_SOFTMUTE_MASK,
190 STIH416_DAC_SOFTMUTE_MASK);
191 break;
192 default:
193 return -EINVAL;
194 }
195
196 if (ret < 0) {
197 dev_err(codec->dev, "Failed to update DAC registers");
198 return ret;
199 }
200
201 return ret;
202}
203
204/*
205 * DAC
206 */
207static int sti_sas_dac_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
208{
209 /* Sanity check only */
210 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
211 dev_err(dai->codec->dev,
212 "%s: ERROR: Unsupporter master mask 0x%x\n",
213 __func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
214 return -EINVAL;
215 }
216
217 return 0;
218}
219
220static int stih416_dac_probe(struct snd_soc_dai *dai)
221{
222 struct snd_soc_codec *codec = dai->codec;
223 struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
224 struct sti_dac_audio *dac = &drvdata->dac;
225
226 /* Get reset control */
227 dac->rst = devm_reset_control_get(codec->dev, "dac_rst");
228 if (IS_ERR(dac->rst)) {
229 dev_err(dai->codec->dev,
230 "%s: ERROR: DAC reset control not defined !\n",
231 __func__);
232 dac->rst = NULL;
233 return -EFAULT;
234 }
235 /* Put the DAC into reset */
236 reset_control_assert(dac->rst);
237
238 return 0;
239}
240
241static const struct snd_soc_dapm_widget stih416_sas_dapm_widgets[] = {
242 SND_SOC_DAPM_PGA("DAC bandgap", STIH416_AUDIO_DAC_CTRL,
243 STIH416_DAC_NOT_PNDBG_MASK, 0, NULL, 0),
244 SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH416_AUDIO_DAC_CTRL,
245 STIH416_DAC_ANA_NOT_PWR, 0, NULL, 0),
246 SND_SOC_DAPM_DAC("DAC standby", "dac_p", STIH416_AUDIO_DAC_CTRL,
247 STIH416_DAC_NOT_STANDBY, 0),
248 SND_SOC_DAPM_OUTPUT("DAC Output"),
249};
250
251static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
252 SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH407_AUDIO_DAC_CTRL,
253 STIH407_DAC_STANDBY_ANA, 1, NULL, 0),
254 SND_SOC_DAPM_DAC("DAC standby", "dac_p", STIH407_AUDIO_DAC_CTRL,
255 STIH407_DAC_STANDBY, 1),
256 SND_SOC_DAPM_OUTPUT("DAC Output"),
257};
258
259static const struct snd_soc_dapm_route stih416_sas_route[] = {
260 {"DAC Output", NULL, "DAC bandgap"},
261 {"DAC Output", NULL, "DAC standby ana"},
262 {"DAC standby ana", NULL, "DAC standby"},
263};
264
265static const struct snd_soc_dapm_route stih407_sas_route[] = {
266 {"DAC Output", NULL, "DAC standby ana"},
267 {"DAC standby ana", NULL, "DAC standby"},
268};
269
270static int stih416_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
271{
272 struct snd_soc_codec *codec = dai->codec;
273
274 if (mute) {
275 return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
276 STIH416_DAC_SOFTMUTE_MASK,
277 STIH416_DAC_SOFTMUTE_MASK);
278 } else {
279 return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
280 STIH416_DAC_SOFTMUTE_MASK, 0);
281 }
282}
283
284static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
285{
286 struct snd_soc_codec *codec = dai->codec;
287
288 if (mute) {
289 return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
290 STIH407_DAC_SOFTMUTE_MASK,
291 STIH407_DAC_SOFTMUTE_MASK);
292 } else {
293 return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
294 STIH407_DAC_SOFTMUTE_MASK,
295 0);
296 }
297}
298
299/*
300 * SPDIF
301 */
302static int sti_sas_spdif_set_fmt(struct snd_soc_dai *dai,
303 unsigned int fmt)
304{
305 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
306 dev_err(dai->codec->dev,
307 "%s: ERROR: Unsupporter master mask 0x%x\n",
308 __func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
309 return -EINVAL;
310 }
311
312 return 0;
313}
314
315/*
316 * sti_sas_spdif_trigger:
317 * Trigger function is used to ensure that BiPhase Formater is disabled
318 * before CPU dai is stopped.
319 * This is mandatory to avoid that BPF is stalled
320 */
321static int sti_sas_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
322 struct snd_soc_dai *dai)
323{
324 struct snd_soc_codec *codec = dai->codec;
325
326 switch (cmd) {
327 case SNDRV_PCM_TRIGGER_START:
328 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
329 return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
330 SPDIF_BIPHASE_ENABLE_MASK,
331 SPDIF_BIPHASE_ENABLE_MASK);
332 case SNDRV_PCM_TRIGGER_RESUME:
333 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
334 case SNDRV_PCM_TRIGGER_STOP:
335 case SNDRV_PCM_TRIGGER_SUSPEND:
336 return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
337 SPDIF_BIPHASE_ENABLE_MASK,
338 0);
339 default:
340 return -EINVAL;
341 }
342}
343
344static bool sti_sas_volatile_register(struct device *dev, unsigned int reg)
345{
346 if (reg == STIH407_AUDIO_GLUE_CTRL)
347 return true;
348
349 return false;
350}
351
352/*
353 * CODEC DAIS
354 */
355
356/*
357 * sti_sas_set_sysclk:
358 * get MCLK input frequency to check that MCLK-FS ratio is coherent
359 */
360static int sti_sas_set_sysclk(struct snd_soc_dai *dai, int clk_id,
361 unsigned int freq, int dir)
362{
363 struct snd_soc_codec *codec = dai->codec;
364 struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
365
366 if (dir == SND_SOC_CLOCK_OUT)
367 return 0;
368
369 if (clk_id != 0)
370 return -EINVAL;
371
372 switch (dai->id) {
373 case STI_SAS_DAI_SPDIF_OUT:
374 drvdata->spdif.mclk = freq;
375 break;
376
377 case STI_SAS_DAI_ANALOG_OUT:
378 drvdata->dac.mclk = freq;
379 break;
380 }
381
382 return 0;
383}
384
385static int sti_sas_prepare(struct snd_pcm_substream *substream,
386 struct snd_soc_dai *dai)
387{
388 struct snd_soc_codec *codec = dai->codec;
389 struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
390 struct snd_pcm_runtime *runtime = substream->runtime;
391
392 switch (dai->id) {
393 case STI_SAS_DAI_SPDIF_OUT:
394 if ((drvdata->spdif.mclk / runtime->rate) != 128) {
395 dev_err(codec->dev, "unexpected mclk-fs ratio");
396 return -EINVAL;
397 }
398 break;
399 case STI_SAS_DAI_ANALOG_OUT:
400 if ((drvdata->dac.mclk / runtime->rate) != 256) {
401 dev_err(codec->dev, "unexpected mclk-fs ratio");
402 return -EINVAL;
403 }
404 break;
405 }
406
407 return 0;
408}
409
410static const struct snd_soc_dai_ops stih416_dac_ops = {
411 .set_fmt = sti_sas_dac_set_fmt,
412 .mute_stream = stih416_sas_dac_mute,
413 .prepare = sti_sas_prepare,
414 .set_sysclk = sti_sas_set_sysclk,
415};
416
417static const struct snd_soc_dai_ops stih407_dac_ops = {
418 .set_fmt = sti_sas_dac_set_fmt,
419 .mute_stream = stih407_sas_dac_mute,
420 .prepare = sti_sas_prepare,
421 .set_sysclk = sti_sas_set_sysclk,
422};
423
424static const struct regmap_config stih407_sas_regmap = {
425 .reg_bits = 32,
426 .val_bits = 32,
427
428 .max_register = STIH407_AUDIO_DAC_CTRL,
429 .reg_defaults = stih407_sas_reg_defaults,
430 .num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
431 .volatile_reg = sti_sas_volatile_register,
432 .cache_type = REGCACHE_RBTREE,
433 .reg_read = sti_sas_read_reg,
434 .reg_write = sti_sas_write_reg,
435};
436
437static const struct regmap_config stih416_sas_regmap = {
438 .reg_bits = 32,
439 .val_bits = 32,
440
441 .max_register = STIH416_AUDIO_DAC_CTRL,
442 .reg_defaults = stih416_sas_reg_defaults,
443 .num_reg_defaults = ARRAY_SIZE(stih416_sas_reg_defaults),
444 .volatile_reg = sti_sas_volatile_register,
445 .cache_type = REGCACHE_RBTREE,
446 .reg_read = sti_sas_read_reg,
447 .reg_write = sti_sas_write_reg,
448};
449
450static const struct sti_sas_dev_data stih416_data = {
451 .chipid = CHIPID_STIH416,
452 .regmap = &stih416_sas_regmap,
453 .dac_ops = &stih416_dac_ops,
454 .dapm_widgets = stih416_sas_dapm_widgets,
455 .num_dapm_widgets = ARRAY_SIZE(stih416_sas_dapm_widgets),
456 .dapm_routes = stih416_sas_route,
457 .num_dapm_routes = ARRAY_SIZE(stih416_sas_route),
458};
459
460static const struct sti_sas_dev_data stih407_data = {
461 .chipid = CHIPID_STIH407,
462 .regmap = &stih407_sas_regmap,
463 .dac_ops = &stih407_dac_ops,
464 .dapm_widgets = stih407_sas_dapm_widgets,
465 .num_dapm_widgets = ARRAY_SIZE(stih407_sas_dapm_widgets),
466 .dapm_routes = stih407_sas_route,
467 .num_dapm_routes = ARRAY_SIZE(stih407_sas_route),
468};
469
470static struct snd_soc_dai_driver sti_sas_dai[] = {
471 {
472 .name = "sas-dai-spdif-out",
473 .id = STI_SAS_DAI_SPDIF_OUT,
474 .playback = {
475 .stream_name = "spdif_p",
476 .channels_min = 2,
477 .channels_max = 2,
478 .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
479 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 |
480 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
481 SNDRV_PCM_RATE_192000,
482 .formats = SNDRV_PCM_FMTBIT_S16_LE |
483 SNDRV_PCM_FMTBIT_S32_LE,
484 },
485 .ops = (struct snd_soc_dai_ops[]) {
486 {
487 .set_fmt = sti_sas_spdif_set_fmt,
488 .trigger = sti_sas_spdif_trigger,
489 .set_sysclk = sti_sas_set_sysclk,
490 .prepare = sti_sas_prepare,
491 }
492 },
493 },
494 {
495 .name = "sas-dai-dac",
496 .id = STI_SAS_DAI_ANALOG_OUT,
497 .playback = {
498 .stream_name = "dac_p",
499 .channels_min = 2,
500 .channels_max = 2,
501 .rates = SNDRV_PCM_RATE_8000_48000,
502 .formats = SNDRV_PCM_FMTBIT_S16_LE |
503 SNDRV_PCM_FMTBIT_S32_LE,
504 },
505 },
506};
507
508#ifdef CONFIG_PM_SLEEP
509static int sti_sas_resume(struct snd_soc_codec *codec)
510{
511 struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
512
513 return sti_sas_init_sas_registers(codec, drvdata);
514}
515#else
516#define sti_sas_resume NULL
517#endif
518
519static int sti_sas_codec_probe(struct snd_soc_codec *codec)
520{
521 struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
522 int ret;
523
524 ret = sti_sas_init_sas_registers(codec, drvdata);
525
526 return ret;
527}
528
529static struct snd_soc_codec_driver sti_sas_driver = {
530 .probe = sti_sas_codec_probe,
531 .resume = sti_sas_resume,
532};
533
534static const struct of_device_id sti_sas_dev_match[] = {
535 {
536 .compatible = "st,stih416-sas-codec",
537 .data = &stih416_data,
538 },
539 {
540 .compatible = "st,stih407-sas-codec",
541 .data = &stih407_data,
542 },
543 {},
544};
545
546static int sti_sas_driver_probe(struct platform_device *pdev)
547{
548 struct device_node *pnode = pdev->dev.of_node;
549 struct sti_sas_data *drvdata;
550 const struct of_device_id *of_id;
551
552 /* Allocate device structure */
553 drvdata = devm_kzalloc(&pdev->dev, sizeof(struct sti_sas_data),
554 GFP_KERNEL);
555 if (!drvdata)
556 return -ENOMEM;
557
558 /* Populate data structure depending on compatibility */
559 of_id = of_match_node(sti_sas_dev_match, pnode);
560 if (!of_id->data) {
561 dev_err(&pdev->dev, "data associated to device is missing");
562 return -EINVAL;
563 }
564
565 drvdata->dev_data = (struct sti_sas_dev_data *)of_id->data;
566
567 /* Initialise device structure */
568 drvdata->dev = &pdev->dev;
569
570 /* Request the DAC & SPDIF registers memory region */
571 drvdata->dac.virt_regmap = devm_regmap_init(&pdev->dev, NULL, drvdata,
572 drvdata->dev_data->regmap);
573 if (IS_ERR(drvdata->dac.virt_regmap)) {
574 dev_err(&pdev->dev, "audio registers not enabled\n");
575 return PTR_ERR(drvdata->dac.virt_regmap);
576 }
577
578 /* Request the syscon region */
579 drvdata->dac.regmap =
580 syscon_regmap_lookup_by_phandle(pnode, "st,syscfg");
581 if (IS_ERR(drvdata->dac.regmap)) {
582 dev_err(&pdev->dev, "syscon registers not available\n");
583 return PTR_ERR(drvdata->dac.regmap);
584 }
585 drvdata->spdif.regmap = drvdata->dac.regmap;
586
587 /* Set DAC dai probe */
588 if (drvdata->dev_data->chipid == CHIPID_STIH416)
589 sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].probe = stih416_dac_probe;
590
591 sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
592
593 /* Set dapms*/
594 sti_sas_driver.dapm_widgets = drvdata->dev_data->dapm_widgets;
595 sti_sas_driver.num_dapm_widgets = drvdata->dev_data->num_dapm_widgets;
596
597 sti_sas_driver.dapm_routes = drvdata->dev_data->dapm_routes;
598 sti_sas_driver.num_dapm_routes = drvdata->dev_data->num_dapm_routes;
599
600 /* Store context */
601 dev_set_drvdata(&pdev->dev, drvdata);
602
603 return snd_soc_register_codec(&pdev->dev, &sti_sas_driver,
604 sti_sas_dai,
605 ARRAY_SIZE(sti_sas_dai));
606}
607
608static int sti_sas_driver_remove(struct platform_device *pdev)
609{
610 snd_soc_unregister_codec(&pdev->dev);
611
612 return 0;
613}
614
615static struct platform_driver sti_sas_platform_driver = {
616 .driver = {
617 .name = "sti-sas-codec",
618 .of_match_table = sti_sas_dev_match,
619 },
620 .probe = sti_sas_driver_probe,
621 .remove = sti_sas_driver_remove,
622};
623
624module_platform_driver(sti_sas_platform_driver);
625
626MODULE_DESCRIPTION("audio codec for STMicroelectronics sti platforms");
627MODULE_AUTHOR("Arnaud.pouliquen@st.com");
628MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 083b6b3e6cd2..2f6a65afe5d2 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -38,7 +38,7 @@
38 38
39#include "tas2552.h" 39#include "tas2552.h"
40 40
41static struct reg_default tas2552_reg_defs[] = { 41static const struct reg_default tas2552_reg_defs[] = {
42 {TAS2552_CFG_1, 0x22}, 42 {TAS2552_CFG_1, 0x22},
43 {TAS2552_CFG_3, 0x80}, 43 {TAS2552_CFG_3, 0x80},
44 {TAS2552_DOUT, 0x00}, 44 {TAS2552_DOUT, 0x00},
@@ -520,7 +520,7 @@ static const struct dev_pm_ops tas2552_pm = {
520 NULL) 520 NULL)
521}; 521};
522 522
523static struct snd_soc_dai_ops tas2552_speaker_dai_ops = { 523static const struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
524 .hw_params = tas2552_hw_params, 524 .hw_params = tas2552_hw_params,
525 .prepare = tas2552_prepare, 525 .prepare = tas2552_prepare,
526 .set_sysclk = tas2552_set_dai_sysclk, 526 .set_sysclk = tas2552_set_dai_sysclk,
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 85bcc374c8e8..39307ad41a34 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -179,7 +179,7 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec,
179 case SND_SOC_BIAS_PREPARE: 179 case SND_SOC_BIAS_PREPARE:
180 break; 180 break;
181 case SND_SOC_BIAS_STANDBY: 181 case SND_SOC_BIAS_STANDBY:
182 if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { 182 if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
183 if (!IS_ERR(priv->mclk)) { 183 if (!IS_ERR(priv->mclk)) {
184 ret = clk_prepare_enable(priv->mclk); 184 ret = clk_prepare_enable(priv->mclk);
185 if (ret) { 185 if (ret) {
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index 86d05f01e5f9..cb5310d89c0f 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -160,7 +160,7 @@ static int tfa9879_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
160 return 0; 160 return 0;
161} 161}
162 162
163static struct reg_default tfa9879_regs[] = { 163static const struct reg_default tfa9879_regs[] = {
164 { TFA9879_DEVICE_CONTROL, 0x0000 }, /* 0x00 */ 164 { TFA9879_DEVICE_CONTROL, 0x0000 }, /* 0x00 */
165 { TFA9879_SERIAL_INTERFACE_1, 0x0a18 }, /* 0x01 */ 165 { TFA9879_SERIAL_INTERFACE_1, 0x0a18 }, /* 0x01 */
166 { TFA9879_PCM_IOM2_FORMAT_1, 0x0007 }, /* 0x02 */ 166 { TFA9879_PCM_IOM2_FORMAT_1, 0x0007 }, /* 0x02 */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 48dd9b2cd0fa..ee4def4f819f 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1121,7 +1121,7 @@ static struct snd_soc_codec_driver soc_codec_driver_aic31xx = {
1121 .num_dapm_routes = ARRAY_SIZE(aic31xx_audio_map), 1121 .num_dapm_routes = ARRAY_SIZE(aic31xx_audio_map),
1122}; 1122};
1123 1123
1124static struct snd_soc_dai_ops aic31xx_dai_ops = { 1124static const struct snd_soc_dai_ops aic31xx_dai_ops = {
1125 .hw_params = aic31xx_hw_params, 1125 .hw_params = aic31xx_hw_params,
1126 .set_sysclk = aic31xx_set_dai_sysclk, 1126 .set_sysclk = aic31xx_set_dai_sysclk,
1127 .set_fmt = aic31xx_set_dai_fmt, 1127 .set_fmt = aic31xx_set_dai_fmt,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 125a93517cdb..1a82b19b2644 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1668,7 +1668,7 @@ static const struct i2c_device_id aic3x_i2c_id[] = {
1668}; 1668};
1669MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id); 1669MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
1670 1670
1671static const struct reg_default aic3007_class_d[] = { 1671static const struct reg_sequence aic3007_class_d[] = {
1672 /* Class-D speaker driver init; datasheet p. 46 */ 1672 /* Class-D speaker driver init; datasheet p. 46 */
1673 { AIC3X_PAGE_SELECT, 0x0D }, 1673 { AIC3X_PAGE_SELECT, 0x0D },
1674 { 0xD, 0x0D }, 1674 { 0xD, 0x0D },
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index d4fa224d68b1..35199fc1f6ca 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -166,7 +166,7 @@ static const struct wm_adsp_region wm2200_dsp2_regions[] = {
166 { .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE }, 166 { .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE },
167}; 167};
168 168
169static struct reg_default wm2200_reg_defaults[] = { 169static const struct reg_default wm2200_reg_defaults[] = {
170 { 0x000B, 0x0000 }, /* R11 - Tone Generator 1 */ 170 { 0x000B, 0x0000 }, /* R11 - Tone Generator 1 */
171 { 0x0102, 0x0000 }, /* R258 - Clocking 3 */ 171 { 0x0102, 0x0000 }, /* R258 - Clocking 3 */
172 { 0x0103, 0x0011 }, /* R259 - Clocking 4 */ 172 { 0x0103, 0x0011 }, /* R259 - Clocking 4 */
@@ -897,7 +897,7 @@ static bool wm2200_readable_register(struct device *dev, unsigned int reg)
897 } 897 }
898} 898}
899 899
900static const struct reg_default wm2200_reva_patch[] = { 900static const struct reg_sequence wm2200_reva_patch[] = {
901 { 0x07, 0x0003 }, 901 { 0x07, 0x0003 },
902 { 0x102, 0x0200 }, 902 { 0x102, 0x0200 },
903 { 0x203, 0x0084 }, 903 { 0x203, 0x0084 },
@@ -2481,7 +2481,7 @@ static int wm2200_runtime_resume(struct device *dev)
2481} 2481}
2482#endif 2482#endif
2483 2483
2484static struct dev_pm_ops wm2200_pm = { 2484static const struct dev_pm_ops wm2200_pm = {
2485 SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume, 2485 SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume,
2486 NULL) 2486 NULL)
2487}; 2487};
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index b9594d6feb71..3695b1dcbaf7 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1247,7 +1247,7 @@ static const struct snd_soc_dapm_route wm5100_dapm_routes[] = {
1247 { "PWM2", NULL, "PWM2 Driver" }, 1247 { "PWM2", NULL, "PWM2 Driver" },
1248}; 1248};
1249 1249
1250static const struct reg_default wm5100_reva_patches[] = { 1250static const struct reg_sequence wm5100_reva_patches[] = {
1251 { WM5100_AUDIO_IF_1_10, 0 }, 1251 { WM5100_AUDIO_IF_1_10, 0 },
1252 { WM5100_AUDIO_IF_1_11, 1 }, 1252 { WM5100_AUDIO_IF_1_11, 1 },
1253 { WM5100_AUDIO_IF_1_12, 2 }, 1253 { WM5100_AUDIO_IF_1_12, 2 },
@@ -2708,7 +2708,7 @@ static int wm5100_runtime_resume(struct device *dev)
2708} 2708}
2709#endif 2709#endif
2710 2710
2711static struct dev_pm_ops wm5100_pm = { 2711static const struct dev_pm_ops wm5100_pm = {
2712 SET_RUNTIME_PM_OPS(wm5100_runtime_suspend, wm5100_runtime_resume, 2712 SET_RUNTIME_PM_OPS(wm5100_runtime_suspend, wm5100_runtime_resume,
2713 NULL) 2713 NULL)
2714}; 2714};
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index d097f09e50f2..64637d1cf4e5 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -788,8 +788,7 @@ ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
788ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE), 788ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
789ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE), 789ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
790 790
791SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19), 791ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
792SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
793SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT, 792SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
794 24, 0, eq_tlv), 793 24, 0, eq_tlv),
795SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT, 794SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -801,8 +800,7 @@ SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
801SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT, 800SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
802 24, 0, eq_tlv), 801 24, 0, eq_tlv),
803 802
804SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19), 803ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
805SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
806SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT, 804SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
807 24, 0, eq_tlv), 805 24, 0, eq_tlv),
808SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT, 806SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -814,8 +812,7 @@ SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
814SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT, 812SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
815 24, 0, eq_tlv), 813 24, 0, eq_tlv),
816 814
817SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19), 815ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
818SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
819SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT, 816SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
820 24, 0, eq_tlv), 817 24, 0, eq_tlv),
821SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT, 818SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -827,8 +824,7 @@ SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
827SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT, 824SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
828 24, 0, eq_tlv), 825 24, 0, eq_tlv),
829 826
830SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19), 827ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
831SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
832SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT, 828SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
833 24, 0, eq_tlv), 829 24, 0, eq_tlv),
834SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT, 830SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -851,10 +847,10 @@ ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
851ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE), 847ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
852ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE), 848ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
853 849
854SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1), 850ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
855SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1), 851ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
856SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1), 852ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
857SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1), 853ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
858 854
859ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE), 855ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE),
860ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE), 856ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE),
@@ -1883,7 +1879,7 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
1883 ret = snd_soc_add_codec_controls(codec, 1879 ret = snd_soc_add_codec_controls(codec,
1884 arizona_adsp2_rate_controls, 1); 1880 arizona_adsp2_rate_controls, 1);
1885 if (ret) 1881 if (ret)
1886 return ret; 1882 goto err_adsp2_codec_probe;
1887 1883
1888 arizona_init_spk(codec); 1884 arizona_init_spk(codec);
1889 arizona_init_gpio(codec); 1885 arizona_init_gpio(codec);
@@ -1893,6 +1889,11 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
1893 priv->core.arizona->dapm = dapm; 1889 priv->core.arizona->dapm = dapm;
1894 1890
1895 return 0; 1891 return 0;
1892
1893err_adsp2_codec_probe:
1894 wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
1895
1896 return ret;
1896} 1897}
1897 1898
1898static int wm5102_codec_remove(struct snd_soc_codec *codec) 1899static int wm5102_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 709fcc6169d8..2d1168c768d9 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -247,8 +247,7 @@ ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
247ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE), 247ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
248ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE), 248ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
249 249
250SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19), 250ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
251SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
252SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT, 251SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
253 24, 0, eq_tlv), 252 24, 0, eq_tlv),
254SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT, 253SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -260,8 +259,7 @@ SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
260SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT, 259SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
261 24, 0, eq_tlv), 260 24, 0, eq_tlv),
262 261
263SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19), 262ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
264SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
265SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT, 263SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
266 24, 0, eq_tlv), 264 24, 0, eq_tlv),
267SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT, 265SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -273,8 +271,7 @@ SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
273SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT, 271SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
274 24, 0, eq_tlv), 272 24, 0, eq_tlv),
275 273
276SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19), 274ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
277SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
278SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT, 275SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
279 24, 0, eq_tlv), 276 24, 0, eq_tlv),
280SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT, 277SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -286,8 +283,7 @@ SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
286SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT, 283SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
287 24, 0, eq_tlv), 284 24, 0, eq_tlv),
288 285
289SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19), 286ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
290SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
291SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT, 287SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
292 24, 0, eq_tlv), 288 24, 0, eq_tlv),
293SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT, 289SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -314,10 +310,10 @@ ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
314ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE), 310ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
315ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE), 311ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
316 312
317SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1), 313ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
318SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1), 314ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
319SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1), 315ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
320SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1), 316ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
321 317
322SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode), 318SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
323SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode), 319SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
@@ -1611,18 +1607,24 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
1611 for (i = 0; i < WM5110_NUM_ADSP; ++i) { 1607 for (i = 0; i < WM5110_NUM_ADSP; ++i) {
1612 ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec); 1608 ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
1613 if (ret) 1609 if (ret)
1614 return ret; 1610 goto err_adsp2_codec_probe;
1615 } 1611 }
1616 1612
1617 ret = snd_soc_add_codec_controls(codec, 1613 ret = snd_soc_add_codec_controls(codec,
1618 arizona_adsp2_rate_controls, 1614 arizona_adsp2_rate_controls,
1619 WM5110_NUM_ADSP); 1615 WM5110_NUM_ADSP);
1620 if (ret) 1616 if (ret)
1621 return ret; 1617 goto err_adsp2_codec_probe;
1622 1618
1623 snd_soc_dapm_disable_pin(dapm, "HAPTICS"); 1619 snd_soc_dapm_disable_pin(dapm, "HAPTICS");
1624 1620
1625 return 0; 1621 return 0;
1622
1623err_adsp2_codec_probe:
1624 for (--i; i >= 0; --i)
1625 wm_adsp2_codec_remove(&priv->core.adsp[i], codec);
1626
1627 return ret;
1626} 1628}
1627 1629
1628static int wm5110_codec_remove(struct snd_soc_codec *codec) 1630static int wm5110_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 3cff5a699e57..b098a83a44d8 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -598,6 +598,7 @@ static const struct of_device_id wm8510_of_match[] = {
598 { .compatible = "wlf,wm8510" }, 598 { .compatible = "wlf,wm8510" },
599 { }, 599 { },
600}; 600};
601MODULE_DEVICE_TABLE(of, wm8510_of_match);
601 602
602static const struct regmap_config wm8510_regmap = { 603static const struct regmap_config wm8510_regmap = {
603 .reg_bits = 7, 604 .reg_bits = 7,
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 5f8fde56e68b..aa287a3965e7 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -430,6 +430,7 @@ static const struct of_device_id wm8523_of_match[] = {
430 { .compatible = "wlf,wm8523" }, 430 { .compatible = "wlf,wm8523" },
431 { }, 431 { },
432}; 432};
433MODULE_DEVICE_TABLE(of, wm8523_of_match);
433 434
434static const struct regmap_config wm8523_regmap = { 435static const struct regmap_config wm8523_regmap = {
435 .reg_bits = 8, 436 .reg_bits = 8,
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index abf60355f7f7..66602bf02f6e 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -916,6 +916,7 @@ static const struct of_device_id wm8580_of_match[] = {
916 { .compatible = "wlf,wm8580" }, 916 { .compatible = "wlf,wm8580" },
917 { }, 917 { },
918}; 918};
919MODULE_DEVICE_TABLE(of, wm8580_of_match);
919 920
920static const struct regmap_config wm8580_regmap = { 921static const struct regmap_config wm8580_regmap = {
921 .reg_bits = 7, 922 .reg_bits = 7,
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 40c4617e3ef1..5c01707d4999 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -113,7 +113,7 @@ WM8962_REGULATOR_EVENT(5)
113WM8962_REGULATOR_EVENT(6) 113WM8962_REGULATOR_EVENT(6)
114WM8962_REGULATOR_EVENT(7) 114WM8962_REGULATOR_EVENT(7)
115 115
116static struct reg_default wm8962_reg[] = { 116static const struct reg_default wm8962_reg[] = {
117 { 0, 0x009F }, /* R0 - Left Input volume */ 117 { 0, 0x009F }, /* R0 - Left Input volume */
118 { 1, 0x049F }, /* R1 - Right Input volume */ 118 { 1, 0x049F }, /* R1 - Right Input volume */
119 { 2, 0x0000 }, /* R2 - HPOUTL volume */ 119 { 2, 0x0000 }, /* R2 - HPOUTL volume */
@@ -3495,7 +3495,7 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8962 = {
3495}; 3495};
3496 3496
3497/* Improve power consumption for IN4 DC measurement mode */ 3497/* Improve power consumption for IN4 DC measurement mode */
3498static const struct reg_default wm8962_dc_measure[] = { 3498static const struct reg_sequence wm8962_dc_measure[] = {
3499 { 0xfd, 0x1 }, 3499 { 0xfd, 0x1 },
3500 { 0xcc, 0x40 }, 3500 { 0xcc, 0x40 },
3501 { 0xfd, 0 }, 3501 { 0xfd, 0 },
@@ -3859,7 +3859,7 @@ static int wm8962_runtime_suspend(struct device *dev)
3859} 3859}
3860#endif 3860#endif
3861 3861
3862static struct dev_pm_ops wm8962_pm = { 3862static const struct dev_pm_ops wm8962_pm = {
3863 SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL) 3863 SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL)
3864}; 3864};
3865 3865
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 3f4dfb95d2d7..ac9efd63dbef 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -41,7 +41,7 @@ static const char *wm8993_supply_names[WM8993_NUM_SUPPLIES] = {
41 "SPKVDD", 41 "SPKVDD",
42}; 42};
43 43
44static struct reg_default wm8993_reg_defaults[] = { 44static const struct reg_default wm8993_reg_defaults[] = {
45 { 1, 0x0000 }, /* R1 - Power Management (1) */ 45 { 1, 0x0000 }, /* R1 - Power Management (1) */
46 { 2, 0x6000 }, /* R2 - Power Management (2) */ 46 { 2, 0x6000 }, /* R2 - Power Management (2) */
47 { 3, 0x0000 }, /* R3 - Power Management (3) */ 47 { 3, 0x0000 }, /* R3 - Power Management (3) */
@@ -1595,7 +1595,7 @@ static int wm8993_resume(struct snd_soc_codec *codec)
1595#endif 1595#endif
1596 1596
1597/* Tune DC servo configuration */ 1597/* Tune DC servo configuration */
1598static struct reg_default wm8993_regmap_patch[] = { 1598static const struct reg_sequence wm8993_regmap_patch[] = {
1599 { 0x44, 3 }, 1599 { 0x44, 3 },
1600 { 0x56, 3 }, 1600 { 0x56, 3 },
1601 { 0x44, 0 }, 1601 { 0x44, 0 },
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 962e1d31a629..2ccbb322df77 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1942,14 +1942,16 @@ static const struct snd_soc_dapm_route intercon[] = {
1942 { "AIF2ADCDAT", NULL, "AIF2ADC Mux" }, 1942 { "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
1943 1943
1944 /* AIF3 output */ 1944 /* AIF3 output */
1945 { "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1L" }, 1945 { "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1L" },
1946 { "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1R" }, 1946 { "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1R" },
1947 { "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2L" }, 1947 { "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2L" },
1948 { "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2R" }, 1948 { "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2R" },
1949 { "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCL" }, 1949 { "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
1950 { "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCR" }, 1950 { "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
1951 { "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" }, 1951 { "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACL" },
1952 { "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" }, 1952 { "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACR" },
1953
1954 { "AIF3ADCDAT", NULL, "AIF3ADC Mux" },
1953 1955
1954 /* Loopback */ 1956 /* Loopback */
1955 { "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" }, 1957 { "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 29b6688fb6ee..66c09556da79 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -117,7 +117,7 @@ WM8996_REGULATOR_EVENT(0)
117WM8996_REGULATOR_EVENT(1) 117WM8996_REGULATOR_EVENT(1)
118WM8996_REGULATOR_EVENT(2) 118WM8996_REGULATOR_EVENT(2)
119 119
120static struct reg_default wm8996_reg[] = { 120static const struct reg_default wm8996_reg[] = {
121 { WM8996_POWER_MANAGEMENT_1, 0x0 }, 121 { WM8996_POWER_MANAGEMENT_1, 0x0 },
122 { WM8996_POWER_MANAGEMENT_2, 0x0 }, 122 { WM8996_POWER_MANAGEMENT_2, 0x0 },
123 { WM8996_POWER_MANAGEMENT_3, 0x0 }, 123 { WM8996_POWER_MANAGEMENT_3, 0x0 },
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 4134dc7e1243..b4dba3a02aba 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -174,8 +174,7 @@ ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
174ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE), 174ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
175ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE), 175ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
176 176
177SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19), 177ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
178SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
179SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT, 178SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
180 24, 0, eq_tlv), 179 24, 0, eq_tlv),
181SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT, 180SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -187,8 +186,7 @@ SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
187SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT, 186SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
188 24, 0, eq_tlv), 187 24, 0, eq_tlv),
189 188
190SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19), 189ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
191SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
192SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT, 190SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
193 24, 0, eq_tlv), 191 24, 0, eq_tlv),
194SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT, 192SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -200,8 +198,7 @@ SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
200SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT, 198SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
201 24, 0, eq_tlv), 199 24, 0, eq_tlv),
202 200
203SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19), 201ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
204SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
205SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT, 202SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
206 24, 0, eq_tlv), 203 24, 0, eq_tlv),
207SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT, 204SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -213,8 +210,7 @@ SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
213SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT, 210SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
214 24, 0, eq_tlv), 211 24, 0, eq_tlv),
215 212
216SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19), 213ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
217SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
218SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT, 214SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
219 24, 0, eq_tlv), 215 24, 0, eq_tlv),
220SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT, 216SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -242,10 +238,10 @@ SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
242SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode), 238SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
243SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode), 239SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
244 240
245SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1), 241ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
246SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1), 242ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
247SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1), 243ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
248SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1), 244ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
249 245
250SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]), 246SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]),
251SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]), 247SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]),
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 011516eec4b5..45223a20331f 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -30,7 +30,7 @@
30#include <sound/wm9081.h> 30#include <sound/wm9081.h>
31#include "wm9081.h" 31#include "wm9081.h"
32 32
33static struct reg_default wm9081_reg[] = { 33static const struct reg_default wm9081_reg[] = {
34 { 2, 0x00B9 }, /* R2 - Analogue Lineout */ 34 { 2, 0x00B9 }, /* R2 - Analogue Lineout */
35 { 3, 0x00B9 }, /* R3 - Analogue Speaker PGA */ 35 { 3, 0x00B9 }, /* R3 - Analogue Speaker PGA */
36 { 4, 0x0001 }, /* R4 - VMID Control */ 36 { 4, 0x0001 }, /* R4 - VMID Control */
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 5cc457ef8894..744842c76a60 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -22,6 +22,9 @@
22 22
23#include "wm9705.h" 23#include "wm9705.h"
24 24
25#define WM9705_VENDOR_ID 0x574d4c05
26#define WM9705_VENDOR_ID_MASK 0xffffffff
27
25/* 28/*
26 * WM9705 register cache 29 * WM9705 register cache
27 */ 30 */
@@ -293,21 +296,6 @@ static struct snd_soc_dai_driver wm9705_dai[] = {
293 } 296 }
294}; 297};
295 298
296static int wm9705_reset(struct snd_soc_codec *codec)
297{
298 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
299
300 if (soc_ac97_ops->reset) {
301 soc_ac97_ops->reset(ac97);
302 if (ac97_read(codec, 0) == wm9705_reg[0])
303 return 0; /* Success */
304 }
305
306 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
307
308 return -EIO;
309}
310
311#ifdef CONFIG_PM 299#ifdef CONFIG_PM
312static int wm9705_soc_suspend(struct snd_soc_codec *codec) 300static int wm9705_soc_suspend(struct snd_soc_codec *codec)
313{ 301{
@@ -324,7 +312,8 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
324 int i, ret; 312 int i, ret;
325 u16 *cache = codec->reg_cache; 313 u16 *cache = codec->reg_cache;
326 314
327 ret = wm9705_reset(codec); 315 ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
316 WM9705_VENDOR_ID_MASK);
328 if (ret < 0) 317 if (ret < 0)
329 return ret; 318 return ret;
330 319
@@ -342,30 +331,17 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
342static int wm9705_soc_probe(struct snd_soc_codec *codec) 331static int wm9705_soc_probe(struct snd_soc_codec *codec)
343{ 332{
344 struct snd_ac97 *ac97; 333 struct snd_ac97 *ac97;
345 int ret = 0;
346 334
347 ac97 = snd_soc_alloc_ac97_codec(codec); 335 ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
336 WM9705_VENDOR_ID_MASK);
348 if (IS_ERR(ac97)) { 337 if (IS_ERR(ac97)) {
349 ret = PTR_ERR(ac97);
350 dev_err(codec->dev, "Failed to register AC97 codec\n"); 338 dev_err(codec->dev, "Failed to register AC97 codec\n");
351 return ret; 339 return PTR_ERR(ac97);
352 } 340 }
353 341
354 ret = wm9705_reset(codec);
355 if (ret)
356 goto err_put_device;
357
358 ret = device_add(&ac97->dev);
359 if (ret)
360 goto err_put_device;
361
362 snd_soc_codec_set_drvdata(codec, ac97); 342 snd_soc_codec_set_drvdata(codec, ac97);
363 343
364 return 0; 344 return 0;
365
366err_put_device:
367 put_device(&ac97->dev);
368 return ret;
369} 345}
370 346
371static int wm9705_soc_remove(struct snd_soc_codec *codec) 347static int wm9705_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 1fda104dfc45..488a92224249 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -23,6 +23,9 @@
23#include <sound/tlv.h> 23#include <sound/tlv.h>
24#include "wm9712.h" 24#include "wm9712.h"
25 25
26#define WM9712_VENDOR_ID 0x574d4c12
27#define WM9712_VENDOR_ID_MASK 0xffffffff
28
26struct wm9712_priv { 29struct wm9712_priv {
27 struct snd_ac97 *ac97; 30 struct snd_ac97 *ac97;
28 unsigned int hp_mixer[2]; 31 unsigned int hp_mixer[2];
@@ -613,35 +616,14 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
613 return 0; 616 return 0;
614} 617}
615 618
616static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
617{
618 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
619
620 if (try_warm && soc_ac97_ops->warm_reset) {
621 soc_ac97_ops->warm_reset(wm9712->ac97);
622 if (ac97_read(codec, 0) == wm9712_reg[0])
623 return 1;
624 }
625
626 soc_ac97_ops->reset(wm9712->ac97);
627 if (soc_ac97_ops->warm_reset)
628 soc_ac97_ops->warm_reset(wm9712->ac97);
629 if (ac97_read(codec, 0) != wm9712_reg[0])
630 goto err;
631 return 0;
632
633err:
634 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
635 return -EIO;
636}
637
638static int wm9712_soc_resume(struct snd_soc_codec *codec) 619static int wm9712_soc_resume(struct snd_soc_codec *codec)
639{ 620{
640 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); 621 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
641 int i, ret; 622 int i, ret;
642 u16 *cache = codec->reg_cache; 623 u16 *cache = codec->reg_cache;
643 624
644 ret = wm9712_reset(codec, 1); 625 ret = snd_ac97_reset(wm9712->ac97, true, WM9712_VENDOR_ID,
626 WM9712_VENDOR_ID_MASK);
645 if (ret < 0) 627 if (ret < 0)
646 return ret; 628 return ret;
647 629
@@ -663,31 +645,20 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
663static int wm9712_soc_probe(struct snd_soc_codec *codec) 645static int wm9712_soc_probe(struct snd_soc_codec *codec)
664{ 646{
665 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec); 647 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
666 int ret = 0; 648 int ret;
667 649
668 wm9712->ac97 = snd_soc_alloc_ac97_codec(codec); 650 wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
651 WM9712_VENDOR_ID_MASK);
669 if (IS_ERR(wm9712->ac97)) { 652 if (IS_ERR(wm9712->ac97)) {
670 ret = PTR_ERR(wm9712->ac97); 653 ret = PTR_ERR(wm9712->ac97);
671 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret); 654 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
672 return ret; 655 return ret;
673 } 656 }
674 657
675 ret = wm9712_reset(codec, 0);
676 if (ret < 0)
677 goto err_put_device;
678
679 ret = device_add(&wm9712->ac97->dev);
680 if (ret)
681 goto err_put_device;
682
683 /* set alc mux to none */ 658 /* set alc mux to none */
684 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); 659 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
685 660
686 return 0; 661 return 0;
687
688err_put_device:
689 put_device(&wm9712->ac97->dev);
690 return ret;
691} 662}
692 663
693static int wm9712_soc_remove(struct snd_soc_codec *codec) 664static int wm9712_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 89cd2d6f57c0..955e6511af56 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -29,6 +29,9 @@
29 29
30#include "wm9713.h" 30#include "wm9713.h"
31 31
32#define WM9713_VENDOR_ID 0x574d4c13
33#define WM9713_VENDOR_ID_MASK 0xffffffff
34
32struct wm9713_priv { 35struct wm9713_priv {
33 struct snd_ac97 *ac97; 36 struct snd_ac97 *ac97;
34 u32 pll_in; /* PLL input frequency */ 37 u32 pll_in; /* PLL input frequency */
@@ -1123,28 +1126,6 @@ static struct snd_soc_dai_driver wm9713_dai[] = {
1123 }, 1126 },
1124}; 1127};
1125 1128
1126int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
1127{
1128 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1129
1130 if (try_warm && soc_ac97_ops->warm_reset) {
1131 soc_ac97_ops->warm_reset(wm9713->ac97);
1132 if (ac97_read(codec, 0) == wm9713_reg[0])
1133 return 1;
1134 }
1135
1136 soc_ac97_ops->reset(wm9713->ac97);
1137 if (soc_ac97_ops->warm_reset)
1138 soc_ac97_ops->warm_reset(wm9713->ac97);
1139 if (ac97_read(codec, 0) != wm9713_reg[0]) {
1140 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
1141 return -EIO;
1142 }
1143
1144 return 0;
1145}
1146EXPORT_SYMBOL_GPL(wm9713_reset);
1147
1148static int wm9713_set_bias_level(struct snd_soc_codec *codec, 1129static int wm9713_set_bias_level(struct snd_soc_codec *codec,
1149 enum snd_soc_bias_level level) 1130 enum snd_soc_bias_level level)
1150{ 1131{
@@ -1196,7 +1177,8 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
1196 int i, ret; 1177 int i, ret;
1197 u16 *cache = codec->reg_cache; 1178 u16 *cache = codec->reg_cache;
1198 1179
1199 ret = wm9713_reset(codec, 1); 1180 ret = snd_ac97_reset(wm9713->ac97, true, WM9713_VENDOR_ID,
1181 WM9713_VENDOR_ID_MASK);
1200 if (ret < 0) 1182 if (ret < 0)
1201 return ret; 1183 return ret;
1202 1184
@@ -1222,32 +1204,18 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
1222static int wm9713_soc_probe(struct snd_soc_codec *codec) 1204static int wm9713_soc_probe(struct snd_soc_codec *codec)
1223{ 1205{
1224 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); 1206 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1225 int ret = 0, reg; 1207 int reg;
1226 1208
1227 wm9713->ac97 = snd_soc_alloc_ac97_codec(codec); 1209 wm9713->ac97 = snd_soc_new_ac97_codec(codec, WM9713_VENDOR_ID,
1210 WM9713_VENDOR_ID_MASK);
1228 if (IS_ERR(wm9713->ac97)) 1211 if (IS_ERR(wm9713->ac97))
1229 return PTR_ERR(wm9713->ac97); 1212 return PTR_ERR(wm9713->ac97);
1230 1213
1231 /* do a cold reset for the controller and then try
1232 * a warm reset followed by an optional cold reset for codec */
1233 wm9713_reset(codec, 0);
1234 ret = wm9713_reset(codec, 1);
1235 if (ret < 0)
1236 goto err_put_device;
1237
1238 ret = device_add(&wm9713->ac97->dev);
1239 if (ret)
1240 goto err_put_device;
1241
1242 /* unmute the adc - move to kcontrol */ 1214 /* unmute the adc - move to kcontrol */
1243 reg = ac97_read(codec, AC97_CD) & 0x7fff; 1215 reg = ac97_read(codec, AC97_CD) & 0x7fff;
1244 ac97_write(codec, AC97_CD, reg); 1216 ac97_write(codec, AC97_CD, reg);
1245 1217
1246 return 0; 1218 return 0;
1247
1248err_put_device:
1249 put_device(&wm9713->ac97->dev);
1250 return ret;
1251} 1219}
1252 1220
1253static int wm9713_soc_remove(struct snd_soc_codec *codec) 1221static int wm9713_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.h b/sound/soc/codecs/wm9713.h
index 793da863a03d..53df11b1f727 100644
--- a/sound/soc/codecs/wm9713.h
+++ b/sound/soc/codecs/wm9713.h
@@ -45,6 +45,4 @@
45#define WM9713_DAI_AC97_AUX 1 45#define WM9713_DAI_AC97_AUX 1
46#define WM9713_DAI_PCM_VOICE 2 46#define WM9713_DAI_PCM_VOICE 2
47 47
48int wm9713_reset(struct snd_soc_codec *codec, int try_warm);
49
50#endif 48#endif
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 56cb4d95637d..ec98548a5fc9 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -651,23 +651,15 @@ static const struct snd_soc_component_driver davinci_i2s_component = {
651static int davinci_i2s_probe(struct platform_device *pdev) 651static int davinci_i2s_probe(struct platform_device *pdev)
652{ 652{
653 struct davinci_mcbsp_dev *dev; 653 struct davinci_mcbsp_dev *dev;
654 struct resource *mem, *ioarea, *res; 654 struct resource *mem, *res;
655 void __iomem *io_base;
655 int *dma; 656 int *dma;
656 int ret; 657 int ret;
657 658
658 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 659 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
659 if (!mem) { 660 io_base = devm_ioremap_resource(&pdev->dev, mem);
660 dev_err(&pdev->dev, "no mem resource?\n"); 661 if (IS_ERR(io_base))
661 return -ENODEV; 662 return PTR_ERR(io_base);
662 }
663
664 ioarea = devm_request_mem_region(&pdev->dev, mem->start,
665 resource_size(mem),
666 pdev->name);
667 if (!ioarea) {
668 dev_err(&pdev->dev, "McBSP region already claimed\n");
669 return -EBUSY;
670 }
671 663
672 dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev), 664 dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev),
673 GFP_KERNEL); 665 GFP_KERNEL);
@@ -679,12 +671,7 @@ static int davinci_i2s_probe(struct platform_device *pdev)
679 return -ENODEV; 671 return -ENODEV;
680 clk_enable(dev->clk); 672 clk_enable(dev->clk);
681 673
682 dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 674 dev->base = io_base;
683 if (!dev->base) {
684 dev_err(&pdev->dev, "ioremap failed\n");
685 ret = -ENOMEM;
686 goto err_release_clk;
687 }
688 675
689 dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr = 676 dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
690 (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG); 677 (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b960e626dad9..add6bb99661d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1613,7 +1613,7 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
1613static int davinci_mcasp_probe(struct platform_device *pdev) 1613static int davinci_mcasp_probe(struct platform_device *pdev)
1614{ 1614{
1615 struct snd_dmaengine_dai_dma_data *dma_data; 1615 struct snd_dmaengine_dai_dma_data *dma_data;
1616 struct resource *mem, *ioarea, *res, *dat; 1616 struct resource *mem, *res, *dat;
1617 struct davinci_mcasp_pdata *pdata; 1617 struct davinci_mcasp_pdata *pdata;
1618 struct davinci_mcasp *mcasp; 1618 struct davinci_mcasp *mcasp;
1619 char *irq_name; 1619 char *irq_name;
@@ -1648,22 +1648,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1648 } 1648 }
1649 } 1649 }
1650 1650
1651 ioarea = devm_request_mem_region(&pdev->dev, mem->start, 1651 mcasp->base = devm_ioremap_resource(&pdev->dev, mem);
1652 resource_size(mem), pdev->name); 1652 if (IS_ERR(mcasp->base))
1653 if (!ioarea) { 1653 return PTR_ERR(mcasp->base);
1654 dev_err(&pdev->dev, "Audio region already claimed\n");
1655 return -EBUSY;
1656 }
1657 1654
1658 pm_runtime_enable(&pdev->dev); 1655 pm_runtime_enable(&pdev->dev);
1659 1656
1660 mcasp->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
1661 if (!mcasp->base) {
1662 dev_err(&pdev->dev, "ioremap failed\n");
1663 ret = -ENOMEM;
1664 goto err;
1665 }
1666
1667 mcasp->op_mode = pdata->op_mode; 1657 mcasp->op_mode = pdata->op_mode;
1668 /* sanity check for tdm slots parameter */ 1658 /* sanity check for tdm slots parameter */
1669 if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) { 1659 if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index fabd05f24aeb..c77d9218795a 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -231,8 +231,9 @@ static int davinci_vcif_probe(struct platform_device *pdev)
231 231
232 dev_set_drvdata(&pdev->dev, davinci_vcif_dev); 232 dev_set_drvdata(&pdev->dev, davinci_vcif_dev);
233 233
234 ret = snd_soc_register_component(&pdev->dev, &davinci_vcif_component, 234 ret = devm_snd_soc_register_component(&pdev->dev,
235 &davinci_vcif_dai, 1); 235 &davinci_vcif_component,
236 &davinci_vcif_dai, 1);
236 if (ret != 0) { 237 if (ret != 0) {
237 dev_err(&pdev->dev, "could not register dai\n"); 238 dev_err(&pdev->dev, "could not register dai\n");
238 return ret; 239 return ret;
@@ -241,23 +242,14 @@ static int davinci_vcif_probe(struct platform_device *pdev)
241 ret = edma_pcm_platform_register(&pdev->dev); 242 ret = edma_pcm_platform_register(&pdev->dev);
242 if (ret) { 243 if (ret) {
243 dev_err(&pdev->dev, "register PCM failed: %d\n", ret); 244 dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
244 snd_soc_unregister_component(&pdev->dev);
245 return ret; 245 return ret;
246 } 246 }
247 247
248 return 0; 248 return 0;
249} 249}
250 250
251static int davinci_vcif_remove(struct platform_device *pdev)
252{
253 snd_soc_unregister_component(&pdev->dev);
254
255 return 0;
256}
257
258static struct platform_driver davinci_vcif_driver = { 251static struct platform_driver davinci_vcif_driver = {
259 .probe = davinci_vcif_probe, 252 .probe = davinci_vcif_probe,
260 .remove = davinci_vcif_remove,
261 .driver = { 253 .driver = {
262 .name = "davinci-vcif", 254 .name = "davinci-vcif",
263 }, 255 },
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index e1aa3834b101..883087f2b092 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -182,7 +182,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
182 ); 182 );
183 } else { 183 } else {
184 if (np) { 184 if (np) {
185 /* The eukrea,asoc-tlv320 driver was explicitely 185 /* The eukrea,asoc-tlv320 driver was explicitly
186 * requested (through the device tree). 186 * requested (through the device tree).
187 */ 187 */
188 dev_err(&pdev->dev, 188 dev_err(&pdev->dev,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index de438871040b..5aeb6ed4827e 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -23,6 +23,7 @@
23 23
24#include "../codecs/sgtl5000.h" 24#include "../codecs/sgtl5000.h"
25#include "../codecs/wm8962.h" 25#include "../codecs/wm8962.h"
26#include "../codecs/wm8960.h"
26 27
27#define RX 0 28#define RX 0
28#define TX 1 29#define TX 1
@@ -407,6 +408,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
407 struct fsl_asoc_card_priv *priv; 408 struct fsl_asoc_card_priv *priv;
408 struct i2c_client *codec_dev; 409 struct i2c_client *codec_dev;
409 struct clk *codec_clk; 410 struct clk *codec_clk;
411 const char *codec_dai_name;
410 u32 width; 412 u32 width;
411 int ret; 413 int ret;
412 414
@@ -459,6 +461,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
459 461
460 /* Diversify the card configurations */ 462 /* Diversify the card configurations */
461 if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) { 463 if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
464 codec_dai_name = "cs42888";
462 priv->card.set_bias_level = NULL; 465 priv->card.set_bias_level = NULL;
463 priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq; 466 priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
464 priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq; 467 priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
@@ -467,14 +470,22 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
467 priv->cpu_priv.slot_width = 32; 470 priv->cpu_priv.slot_width = 32;
468 priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; 471 priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
469 } else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) { 472 } else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
473 codec_dai_name = "sgtl5000";
470 priv->codec_priv.mclk_id = SGTL5000_SYSCLK; 474 priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
471 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; 475 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
472 } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) { 476 } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
477 codec_dai_name = "wm8962";
473 priv->card.set_bias_level = fsl_asoc_card_set_bias_level; 478 priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
474 priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK; 479 priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
475 priv->codec_priv.fll_id = WM8962_SYSCLK_FLL; 480 priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
476 priv->codec_priv.pll_id = WM8962_FLL; 481 priv->codec_priv.pll_id = WM8962_FLL;
477 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; 482 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
483 } else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) {
484 codec_dai_name = "wm8960-hifi";
485 priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
486 priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO;
487 priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO;
488 priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
478 } else { 489 } else {
479 dev_err(&pdev->dev, "unknown Device Tree compatible\n"); 490 dev_err(&pdev->dev, "unknown Device Tree compatible\n");
480 return -EINVAL; 491 return -EINVAL;
@@ -521,7 +532,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
521 /* Normal DAI Link */ 532 /* Normal DAI Link */
522 priv->dai_link[0].cpu_of_node = cpu_np; 533 priv->dai_link[0].cpu_of_node = cpu_np;
523 priv->dai_link[0].codec_of_node = codec_np; 534 priv->dai_link[0].codec_of_node = codec_np;
524 priv->dai_link[0].codec_dai_name = codec_dev->name; 535 priv->dai_link[0].codec_dai_name = codec_dai_name;
525 priv->dai_link[0].platform_of_node = cpu_np; 536 priv->dai_link[0].platform_of_node = cpu_np;
526 priv->dai_link[0].dai_fmt = priv->dai_fmt; 537 priv->dai_link[0].dai_fmt = priv->dai_fmt;
527 priv->card.num_links = 1; 538 priv->card.num_links = 1;
@@ -530,7 +541,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
530 /* DPCM DAI Links only if ASRC exsits */ 541 /* DPCM DAI Links only if ASRC exsits */
531 priv->dai_link[1].cpu_of_node = asrc_np; 542 priv->dai_link[1].cpu_of_node = asrc_np;
532 priv->dai_link[1].platform_of_node = asrc_np; 543 priv->dai_link[1].platform_of_node = asrc_np;
533 priv->dai_link[2].codec_dai_name = codec_dev->name; 544 priv->dai_link[2].codec_dai_name = codec_dai_name;
534 priv->dai_link[2].codec_of_node = codec_np; 545 priv->dai_link[2].codec_of_node = codec_np;
535 priv->dai_link[2].cpu_of_node = cpu_np; 546 priv->dai_link[2].cpu_of_node = cpu_np;
536 priv->dai_link[2].dai_fmt = priv->dai_fmt; 547 priv->dai_link[2].dai_fmt = priv->dai_fmt;
@@ -578,6 +589,7 @@ static const struct of_device_id fsl_asoc_card_dt_ids[] = {
578 { .compatible = "fsl,imx-audio-cs42888", }, 589 { .compatible = "fsl,imx-audio-cs42888", },
579 { .compatible = "fsl,imx-audio-sgtl5000", }, 590 { .compatible = "fsl,imx-audio-sgtl5000", },
580 { .compatible = "fsl,imx-audio-wm8962", }, 591 { .compatible = "fsl,imx-audio-wm8962", },
592 { .compatible = "fsl,imx-audio-wm8960", },
581 {} 593 {}
582}; 594};
583 595
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index c068494bae30..9f087d4f73ed 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -931,14 +931,29 @@ static int fsl_asrc_probe(struct platform_device *pdev)
931static int fsl_asrc_runtime_resume(struct device *dev) 931static int fsl_asrc_runtime_resume(struct device *dev)
932{ 932{
933 struct fsl_asrc *asrc_priv = dev_get_drvdata(dev); 933 struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
934 int i; 934 int i, ret;
935 935
936 clk_prepare_enable(asrc_priv->mem_clk); 936 ret = clk_prepare_enable(asrc_priv->mem_clk);
937 clk_prepare_enable(asrc_priv->ipg_clk); 937 if (ret)
938 for (i = 0; i < ASRC_CLK_MAX_NUM; i++) 938 return ret;
939 clk_prepare_enable(asrc_priv->asrck_clk[i]); 939 ret = clk_prepare_enable(asrc_priv->ipg_clk);
940 if (ret)
941 goto disable_mem_clk;
942 for (i = 0; i < ASRC_CLK_MAX_NUM; i++) {
943 ret = clk_prepare_enable(asrc_priv->asrck_clk[i]);
944 if (ret)
945 goto disable_asrck_clk;
946 }
940 947
941 return 0; 948 return 0;
949
950disable_asrck_clk:
951 for (i--; i >= 0; i--)
952 clk_disable_unprepare(asrc_priv->asrck_clk[i]);
953 clk_disable_unprepare(asrc_priv->ipg_clk);
954disable_mem_clk:
955 clk_disable_unprepare(asrc_priv->mem_clk);
956 return ret;
942} 957}
943 958
944static int fsl_asrc_runtime_suspend(struct device *dev) 959static int fsl_asrc_runtime_suspend(struct device *dev)
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 5c7597191e3f..8c2ddc1ea954 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -839,7 +839,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
839 return ret; 839 return ret;
840 } 840 }
841 841
842 ret = imx_pcm_dma_init(pdev); 842 ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
843 if (ret) 843 if (ret)
844 dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret); 844 dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
845 845
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 5c73bea7b11e..a18fd92c4a85 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -791,7 +791,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
791 return ret; 791 return ret;
792 792
793 if (sai->sai_on_imx) 793 if (sai->sai_on_imx)
794 return imx_pcm_dma_init(pdev); 794 return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
795 else 795 else
796 return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); 796 return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
797} 797}
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 066280953c85..b95fbc3f68eb 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -13,7 +13,8 @@
13 13
14#define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ 14#define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
15 SNDRV_PCM_FMTBIT_S20_3LE |\ 15 SNDRV_PCM_FMTBIT_S20_3LE |\
16 SNDRV_PCM_FMTBIT_S24_LE) 16 SNDRV_PCM_FMTBIT_S24_LE |\
17 SNDRV_PCM_FMTBIT_S32_LE)
17 18
18/* SAI Register Map Register */ 19/* SAI Register Map Register */
19#define FSL_SAI_TCSR 0x00 /* SAI Transmit Control */ 20#define FSL_SAI_TCSR 0x00 /* SAI Transmit Control */
@@ -45,7 +46,7 @@
45#define FSL_SAI_xFR(tx) (tx ? FSL_SAI_TFR : FSL_SAI_RFR) 46#define FSL_SAI_xFR(tx) (tx ? FSL_SAI_TFR : FSL_SAI_RFR)
46#define FSL_SAI_xMR(tx) (tx ? FSL_SAI_TMR : FSL_SAI_RMR) 47#define FSL_SAI_xMR(tx) (tx ? FSL_SAI_TMR : FSL_SAI_RMR)
47 48
48/* SAI Transmit/Recieve Control Register */ 49/* SAI Transmit/Receive Control Register */
49#define FSL_SAI_CSR_TERE BIT(31) 50#define FSL_SAI_CSR_TERE BIT(31)
50#define FSL_SAI_CSR_FR BIT(25) 51#define FSL_SAI_CSR_FR BIT(25)
51#define FSL_SAI_CSR_SR BIT(24) 52#define FSL_SAI_CSR_SR BIT(24)
@@ -67,10 +68,10 @@
67#define FSL_SAI_CSR_FRIE BIT(8) 68#define FSL_SAI_CSR_FRIE BIT(8)
68#define FSL_SAI_CSR_FRDE BIT(0) 69#define FSL_SAI_CSR_FRDE BIT(0)
69 70
70/* SAI Transmit and Recieve Configuration 1 Register */ 71/* SAI Transmit and Receive Configuration 1 Register */
71#define FSL_SAI_CR1_RFW_MASK 0x1f 72#define FSL_SAI_CR1_RFW_MASK 0x1f
72 73
73/* SAI Transmit and Recieve Configuration 2 Register */ 74/* SAI Transmit and Receive Configuration 2 Register */
74#define FSL_SAI_CR2_SYNC BIT(30) 75#define FSL_SAI_CR2_SYNC BIT(30)
75#define FSL_SAI_CR2_MSEL_MASK (0x3 << 26) 76#define FSL_SAI_CR2_MSEL_MASK (0x3 << 26)
76#define FSL_SAI_CR2_MSEL_BUS 0 77#define FSL_SAI_CR2_MSEL_BUS 0
@@ -82,12 +83,12 @@
82#define FSL_SAI_CR2_BCD_MSTR BIT(24) 83#define FSL_SAI_CR2_BCD_MSTR BIT(24)
83#define FSL_SAI_CR2_DIV_MASK 0xff 84#define FSL_SAI_CR2_DIV_MASK 0xff
84 85
85/* SAI Transmit and Recieve Configuration 3 Register */ 86/* SAI Transmit and Receive Configuration 3 Register */
86#define FSL_SAI_CR3_TRCE BIT(16) 87#define FSL_SAI_CR3_TRCE BIT(16)
87#define FSL_SAI_CR3_WDFL(x) (x) 88#define FSL_SAI_CR3_WDFL(x) (x)
88#define FSL_SAI_CR3_WDFL_MASK 0x1f 89#define FSL_SAI_CR3_WDFL_MASK 0x1f
89 90
90/* SAI Transmit and Recieve Configuration 4 Register */ 91/* SAI Transmit and Receive Configuration 4 Register */
91#define FSL_SAI_CR4_FRSZ(x) (((x) - 1) << 16) 92#define FSL_SAI_CR4_FRSZ(x) (((x) - 1) << 16)
92#define FSL_SAI_CR4_FRSZ_MASK (0x1f << 16) 93#define FSL_SAI_CR4_FRSZ_MASK (0x1f << 16)
93#define FSL_SAI_CR4_SYWD(x) (((x) - 1) << 8) 94#define FSL_SAI_CR4_SYWD(x) (((x) - 1) << 8)
@@ -97,7 +98,7 @@
97#define FSL_SAI_CR4_FSP BIT(1) 98#define FSL_SAI_CR4_FSP BIT(1)
98#define FSL_SAI_CR4_FSD_MSTR BIT(0) 99#define FSL_SAI_CR4_FSD_MSTR BIT(0)
99 100
100/* SAI Transmit and Recieve Configuration 5 Register */ 101/* SAI Transmit and Receive Configuration 5 Register */
101#define FSL_SAI_CR5_WNW(x) (((x) - 1) << 24) 102#define FSL_SAI_CR5_WNW(x) (((x) - 1) << 24)
102#define FSL_SAI_CR5_WNW_MASK (0x1f << 24) 103#define FSL_SAI_CR5_WNW_MASK (0x1f << 24)
103#define FSL_SAI_CR5_W0W(x) (((x) - 1) << 16) 104#define FSL_SAI_CR5_W0W(x) (((x) - 1) << 16)
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 8e932219cb3a..ab729f2426fe 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -454,7 +454,8 @@ static int fsl_spdif_startup(struct snd_pcm_substream *substream,
454 struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai); 454 struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
455 struct platform_device *pdev = spdif_priv->pdev; 455 struct platform_device *pdev = spdif_priv->pdev;
456 struct regmap *regmap = spdif_priv->regmap; 456 struct regmap *regmap = spdif_priv->regmap;
457 u32 scr, mask, i; 457 u32 scr, mask;
458 int i;
458 int ret; 459 int ret;
459 460
460 /* Reset module and interrupts only for first initialization */ 461 /* Reset module and interrupts only for first initialization */
@@ -482,13 +483,18 @@ static int fsl_spdif_startup(struct snd_pcm_substream *substream,
482 mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK | 483 mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
483 SCR_TXSEL_MASK | SCR_USRC_SEL_MASK | 484 SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
484 SCR_TXFIFO_FSEL_MASK; 485 SCR_TXFIFO_FSEL_MASK;
485 for (i = 0; i < SPDIF_TXRATE_MAX; i++) 486 for (i = 0; i < SPDIF_TXRATE_MAX; i++) {
486 clk_prepare_enable(spdif_priv->txclk[i]); 487 ret = clk_prepare_enable(spdif_priv->txclk[i]);
488 if (ret)
489 goto disable_txclk;
490 }
487 } else { 491 } else {
488 scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC; 492 scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC;
489 mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK| 493 mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
490 SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK; 494 SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK;
491 clk_prepare_enable(spdif_priv->rxclk); 495 ret = clk_prepare_enable(spdif_priv->rxclk);
496 if (ret)
497 goto err;
492 } 498 }
493 regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr); 499 regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
494 500
@@ -497,6 +503,9 @@ static int fsl_spdif_startup(struct snd_pcm_substream *substream,
497 503
498 return 0; 504 return 0;
499 505
506disable_txclk:
507 for (i--; i >= 0; i--)
508 clk_disable_unprepare(spdif_priv->txclk[i]);
500err: 509err:
501 clk_disable_unprepare(spdif_priv->coreclk); 510 clk_disable_unprepare(spdif_priv->coreclk);
502 511
@@ -707,7 +716,7 @@ static int fsl_spdif_subcode_get(struct snd_kcontrol *kcontrol,
707 return ret; 716 return ret;
708} 717}
709 718
710/* Q-subcode infomation. The byte size is SPDIF_UBITS_SIZE/8 */ 719/* Q-subcode information. The byte size is SPDIF_UBITS_SIZE/8 */
711static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol, 720static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol,
712 struct snd_ctl_elem_info *uinfo) 721 struct snd_ctl_elem_info *uinfo)
713{ 722{
@@ -739,7 +748,7 @@ static int fsl_spdif_qget(struct snd_kcontrol *kcontrol,
739 return ret; 748 return ret;
740} 749}
741 750
742/* Valid bit infomation */ 751/* Valid bit information */
743static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol, 752static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol,
744 struct snd_ctl_elem_info *uinfo) 753 struct snd_ctl_elem_info *uinfo)
745{ 754{
@@ -767,7 +776,7 @@ static int fsl_spdif_vbit_get(struct snd_kcontrol *kcontrol,
767 return 0; 776 return 0;
768} 777}
769 778
770/* DPLL lock infomation */ 779/* DPLL lock information */
771static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol, 780static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol,
772 struct snd_ctl_elem_info *uinfo) 781 struct snd_ctl_elem_info *uinfo)
773{ 782{
@@ -1255,7 +1264,7 @@ static int fsl_spdif_probe(struct platform_device *pdev)
1255 return ret; 1264 return ret;
1256 } 1265 }
1257 1266
1258 ret = imx_pcm_dma_init(pdev); 1267 ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
1259 if (ret) 1268 if (ret)
1260 dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret); 1269 dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
1261 1270
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c7647e066cfd..8ec6fb208ea0 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -156,7 +156,7 @@ struct fsl_ssi_soc_data {
156 * 156 *
157 * @dbg_stats: Debugging statistics 157 * @dbg_stats: Debugging statistics
158 * 158 *
159 * @soc: SoC specifc data 159 * @soc: SoC specific data
160 */ 160 */
161struct fsl_ssi_private { 161struct fsl_ssi_private {
162 struct regmap *regs; 162 struct regmap *regs;
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
633 sub *= 100000; 633 sub *= 100000;
634 do_div(sub, freq); 634 do_div(sub, freq);
635 635
636 if (sub < savesub) { 636 if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
637 baudrate = tmprate; 637 baudrate = tmprate;
638 savesub = sub; 638 savesub = sub;
639 pm = i; 639 pm = i;
@@ -900,14 +900,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
900 scr &= ~CCSR_SSI_SCR_SYS_CLK_EN; 900 scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
901 break; 901 break;
902 default: 902 default:
903 return -EINVAL; 903 if (!fsl_ssi_is_ac97(ssi_private))
904 return -EINVAL;
904 } 905 }
905 906
906 stcr |= strcr; 907 stcr |= strcr;
907 srcr |= strcr; 908 srcr |= strcr;
908 909
909 if (ssi_private->cpu_dai_drv.symmetric_rates) { 910 if (ssi_private->cpu_dai_drv.symmetric_rates
910 /* Need to clear RXDIR when using SYNC mode */ 911 || fsl_ssi_is_ac97(ssi_private)) {
912 /* Need to clear RXDIR when using SYNC or AC97 mode */
911 srcr &= ~CCSR_SSI_SRCR_RXDIR; 913 srcr &= ~CCSR_SSI_SRCR_RXDIR;
912 scr |= CCSR_SSI_SCR_SYN; 914 scr |= CCSR_SSI_SCR_SYN;
913 } 915 }
@@ -1101,6 +1103,7 @@ static const struct snd_soc_component_driver fsl_ssi_component = {
1101 1103
1102static struct snd_soc_dai_driver fsl_ssi_ac97_dai = { 1104static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
1103 .bus_control = true, 1105 .bus_control = true,
1106 .probe = fsl_ssi_dai_probe,
1104 .playback = { 1107 .playback = {
1105 .stream_name = "AC97 Playback", 1108 .stream_name = "AC97 Playback",
1106 .channels_min = 2, 1109 .channels_min = 2,
@@ -1127,10 +1130,17 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
1127 struct regmap *regs = fsl_ac97_data->regs; 1130 struct regmap *regs = fsl_ac97_data->regs;
1128 unsigned int lreg; 1131 unsigned int lreg;
1129 unsigned int lval; 1132 unsigned int lval;
1133 int ret;
1130 1134
1131 if (reg > 0x7f) 1135 if (reg > 0x7f)
1132 return; 1136 return;
1133 1137
1138 ret = clk_prepare_enable(fsl_ac97_data->clk);
1139 if (ret) {
1140 pr_err("ac97 write clk_prepare_enable failed: %d\n",
1141 ret);
1142 return;
1143 }
1134 1144
1135 lreg = reg << 12; 1145 lreg = reg << 12;
1136 regmap_write(regs, CCSR_SSI_SACADD, lreg); 1146 regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1141,6 +1151,8 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
1141 regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK, 1151 regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
1142 CCSR_SSI_SACNT_WR); 1152 CCSR_SSI_SACNT_WR);
1143 udelay(100); 1153 udelay(100);
1154
1155 clk_disable_unprepare(fsl_ac97_data->clk);
1144} 1156}
1145 1157
1146static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97, 1158static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
@@ -1151,6 +1163,14 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
1151 unsigned short val = -1; 1163 unsigned short val = -1;
1152 u32 reg_val; 1164 u32 reg_val;
1153 unsigned int lreg; 1165 unsigned int lreg;
1166 int ret;
1167
1168 ret = clk_prepare_enable(fsl_ac97_data->clk);
1169 if (ret) {
1170 pr_err("ac97 read clk_prepare_enable failed: %d\n",
1171 ret);
1172 return -1;
1173 }
1154 1174
1155 lreg = (reg & 0x7f) << 12; 1175 lreg = (reg & 0x7f) << 12;
1156 regmap_write(regs, CCSR_SSI_SACADD, lreg); 1176 regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1162,6 +1182,8 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
1162 regmap_read(regs, CCSR_SSI_SACDAT, &reg_val); 1182 regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
1163 val = (reg_val >> 4) & 0xffff; 1183 val = (reg_val >> 4) & 0xffff;
1164 1184
1185 clk_disable_unprepare(fsl_ac97_data->clk);
1186
1165 return val; 1187 return val;
1166} 1188}
1167 1189
@@ -1210,7 +1232,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
1210 } 1232 }
1211 } 1233 }
1212 1234
1213 /* For those SLAVE implementations, we ingore non-baudclk cases 1235 /* For those SLAVE implementations, we ignore non-baudclk cases
1214 * and, instead, abandon MASTER mode that needs baud clock. 1236 * and, instead, abandon MASTER mode that needs baud clock.
1215 */ 1237 */
1216 ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud"); 1238 ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
@@ -1257,7 +1279,7 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
1257 if (ret) 1279 if (ret)
1258 goto error_pcm; 1280 goto error_pcm;
1259 } else { 1281 } else {
1260 ret = imx_pcm_dma_init(pdev); 1282 ret = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
1261 if (ret) 1283 if (ret)
1262 goto error_pcm; 1284 goto error_pcm;
1263 } 1285 }
@@ -1320,7 +1342,11 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1320 1342
1321 fsl_ac97_data = ssi_private; 1343 fsl_ac97_data = ssi_private;
1322 1344
1323 snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); 1345 ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
1346 if (ret) {
1347 dev_err(&pdev->dev, "could not set AC'97 ops\n");
1348 return ret;
1349 }
1324 } else { 1350 } else {
1325 /* Initialize this copy of the CPU DAI driver structure */ 1351 /* Initialize this copy of the CPU DAI driver structure */
1326 memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, 1352 memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1357,7 +1383,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1357 1383
1358 /* Are the RX and the TX clocks locked? */ 1384 /* Are the RX and the TX clocks locked? */
1359 if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) { 1385 if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
1360 ssi_private->cpu_dai_drv.symmetric_rates = 1; 1386 if (!fsl_ssi_is_ac97(ssi_private))
1387 ssi_private->cpu_dai_drv.symmetric_rates = 1;
1388
1361 ssi_private->cpu_dai_drv.symmetric_channels = 1; 1389 ssi_private->cpu_dai_drv.symmetric_channels = 1;
1362 ssi_private->cpu_dai_drv.symmetric_samplebits = 1; 1390 ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
1363 } 1391 }
@@ -1434,6 +1462,27 @@ done:
1434 _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private, 1462 _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
1435 ssi_private->dai_fmt); 1463 ssi_private->dai_fmt);
1436 1464
1465 if (fsl_ssi_is_ac97(ssi_private)) {
1466 u32 ssi_idx;
1467
1468 ret = of_property_read_u32(np, "cell-index", &ssi_idx);
1469 if (ret) {
1470 dev_err(&pdev->dev, "cannot get SSI index property\n");
1471 goto error_sound_card;
1472 }
1473
1474 ssi_private->pdev =
1475 platform_device_register_data(NULL,
1476 "ac97-codec", ssi_idx, NULL, 0);
1477 if (IS_ERR(ssi_private->pdev)) {
1478 ret = PTR_ERR(ssi_private->pdev);
1479 dev_err(&pdev->dev,
1480 "failed to register AC97 codec platform: %d\n",
1481 ret);
1482 goto error_sound_card;
1483 }
1484 }
1485
1437 return 0; 1486 return 0;
1438 1487
1439error_sound_card: 1488error_sound_card:
@@ -1458,6 +1507,9 @@ static int fsl_ssi_remove(struct platform_device *pdev)
1458 if (ssi_private->soc->imx) 1507 if (ssi_private->soc->imx)
1459 fsl_ssi_imx_clean(pdev, ssi_private); 1508 fsl_ssi_imx_clean(pdev, ssi_private);
1460 1509
1510 if (fsl_ssi_is_ac97(ssi_private))
1511 snd_soc_set_ac97_ops(NULL);
1512
1461 return 0; 1513 return 0;
1462} 1514}
1463 1515
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0db94f492e97..1fc01ed3279d 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -40,7 +40,7 @@ static const struct snd_pcm_hardware imx_pcm_hardware = {
40 SNDRV_PCM_INFO_MMAP_VALID | 40 SNDRV_PCM_INFO_MMAP_VALID |
41 SNDRV_PCM_INFO_PAUSE | 41 SNDRV_PCM_INFO_PAUSE |
42 SNDRV_PCM_INFO_RESUME, 42 SNDRV_PCM_INFO_RESUME,
43 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, 43 .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
44 .period_bytes_min = 128, 44 .period_bytes_min = 128,
45 .period_bytes_max = 65535, /* Limited by SDMA engine */ 45 .period_bytes_max = 65535, /* Limited by SDMA engine */
46 .periods_min = 2, 46 .periods_min = 2,
@@ -52,13 +52,30 @@ static const struct snd_dmaengine_pcm_config imx_dmaengine_pcm_config = {
52 .pcm_hardware = &imx_pcm_hardware, 52 .pcm_hardware = &imx_pcm_hardware,
53 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, 53 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
54 .compat_filter_fn = filter, 54 .compat_filter_fn = filter,
55 .prealloc_buffer_size = IMX_SSI_DMABUF_SIZE, 55 .prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
56}; 56};
57 57
58int imx_pcm_dma_init(struct platform_device *pdev) 58int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
59{ 59{
60 struct snd_dmaengine_pcm_config *config;
61 struct snd_pcm_hardware *pcm_hardware;
62
63 config = devm_kzalloc(&pdev->dev,
64 sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
65 *config = imx_dmaengine_pcm_config;
66 if (size)
67 config->prealloc_buffer_size = size;
68
69 pcm_hardware = devm_kzalloc(&pdev->dev,
70 sizeof(struct snd_pcm_hardware), GFP_KERNEL);
71 *pcm_hardware = imx_pcm_hardware;
72 if (size)
73 pcm_hardware->buffer_bytes_max = size;
74
75 config->pcm_hardware = pcm_hardware;
76
60 return devm_snd_dmaengine_pcm_register(&pdev->dev, 77 return devm_snd_dmaengine_pcm_register(&pdev->dev,
61 &imx_dmaengine_pcm_config, 78 config,
62 SND_DMAENGINE_PCM_FLAG_COMPAT); 79 SND_DMAENGINE_PCM_FLAG_COMPAT);
63} 80}
64EXPORT_SYMBOL_GPL(imx_pcm_dma_init); 81EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index c79cb27473be..133c4470acad 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -20,6 +20,11 @@
20 */ 20 */
21#define IMX_SSI_DMABUF_SIZE (64 * 1024) 21#define IMX_SSI_DMABUF_SIZE (64 * 1024)
22 22
23#define IMX_DEFAULT_DMABUF_SIZE (64 * 1024)
24#define IMX_SAI_DMABUF_SIZE (64 * 1024)
25#define IMX_SPDIF_DMABUF_SIZE (64 * 1024)
26#define IMX_ESAI_DMABUF_SIZE (256 * 1024)
27
23static inline void 28static inline void
24imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data, 29imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
25 int dma, enum sdma_peripheral_type peripheral_type) 30 int dma, enum sdma_peripheral_type peripheral_type)
@@ -39,9 +44,9 @@ struct imx_pcm_fiq_params {
39}; 44};
40 45
41#if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA) 46#if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
42int imx_pcm_dma_init(struct platform_device *pdev); 47int imx_pcm_dma_init(struct platform_device *pdev, size_t size);
43#else 48#else
44static inline int imx_pcm_dma_init(struct platform_device *pdev) 49static inline int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
45{ 50{
46 return -ENODEV; 51 return -ENODEV;
47} 52}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 461ce27b884f..48b2d24dd1f0 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -603,7 +603,7 @@ static int imx_ssi_probe(struct platform_device *pdev)
603 ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx; 603 ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
604 604
605 ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params); 605 ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
606 ssi->dma_init = imx_pcm_dma_init(pdev); 606 ssi->dma_init = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
607 607
608 if (ssi->fiq_init && ssi->dma_init) { 608 if (ssi->fiq_init && ssi->dma_init) {
609 ret = ssi->fiq_init; 609 ret = ssi->fiq_init;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index d5554939146e..3ff76d419436 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -76,6 +76,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
76{ 76{
77 struct snd_soc_pcm_runtime *rtd = substream->private_data; 77 struct snd_soc_pcm_runtime *rtd = substream->private_data;
78 struct snd_soc_dai *codec_dai = rtd->codec_dai; 78 struct snd_soc_dai *codec_dai = rtd->codec_dai;
79 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
79 struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card); 80 struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
80 struct simple_dai_props *dai_props = 81 struct simple_dai_props *dai_props =
81 &priv->dai_props[rtd - rtd->card->rtd]; 82 &priv->dai_props[rtd - rtd->card->rtd];
@@ -91,8 +92,16 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
91 mclk = params_rate(params) * mclk_fs; 92 mclk = params_rate(params) * mclk_fs;
92 ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk, 93 ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
93 SND_SOC_CLOCK_IN); 94 SND_SOC_CLOCK_IN);
95 if (ret && ret != -ENOTSUPP)
96 goto err;
97
98 ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
99 SND_SOC_CLOCK_OUT);
100 if (ret && ret != -ENOTSUPP)
101 goto err;
94 } 102 }
95 103
104err:
96 return ret; 105 return ret;
97} 106}
98 107
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f3060a4ca040..05fde5e6e257 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -26,14 +26,9 @@ config SND_SST_IPC_ACPI
26 depends on ACPI 26 depends on ACPI
27 27
28config SND_SOC_INTEL_SST 28config SND_SOC_INTEL_SST
29 tristate "ASoC support for Intel(R) Smart Sound Technology" 29 tristate
30 select SND_SOC_INTEL_SST_ACPI if ACPI 30 select SND_SOC_INTEL_SST_ACPI if ACPI
31 depends on (X86 || COMPILE_TEST) 31 depends on (X86 || COMPILE_TEST)
32 depends on DW_DMAC_CORE
33 help
34 This adds support for Intel(R) Smart Sound Technology (SST).
35 Say Y if you have such a device
36 If unsure select "N".
37 32
38config SND_SOC_INTEL_SST_ACPI 33config SND_SOC_INTEL_SST_ACPI
39 tristate 34 tristate
@@ -46,8 +41,9 @@ config SND_SOC_INTEL_BAYTRAIL
46 41
47config SND_SOC_INTEL_HASWELL_MACH 42config SND_SOC_INTEL_HASWELL_MACH
48 tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint" 43 tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
49 depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \ 44 depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
50 I2C_DESIGNWARE_PLATFORM 45 depends on DW_DMAC_CORE
46 select SND_SOC_INTEL_SST
51 select SND_SOC_INTEL_HASWELL 47 select SND_SOC_INTEL_HASWELL
52 select SND_SOC_RT5640 48 select SND_SOC_RT5640
53 help 49 help
@@ -58,7 +54,9 @@ config SND_SOC_INTEL_HASWELL_MACH
58 54
59config SND_SOC_INTEL_BYT_RT5640_MACH 55config SND_SOC_INTEL_BYT_RT5640_MACH
60 tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec" 56 tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
61 depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C 57 depends on X86_INTEL_LPSS && I2C
58 depends on DW_DMAC_CORE
59 select SND_SOC_INTEL_SST
62 select SND_SOC_INTEL_BAYTRAIL 60 select SND_SOC_INTEL_BAYTRAIL
63 select SND_SOC_RT5640 61 select SND_SOC_RT5640
64 help 62 help
@@ -67,7 +65,9 @@ config SND_SOC_INTEL_BYT_RT5640_MACH
67 65
68config SND_SOC_INTEL_BYT_MAX98090_MACH 66config SND_SOC_INTEL_BYT_MAX98090_MACH
69 tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec" 67 tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
70 depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C 68 depends on X86_INTEL_LPSS && I2C
69 depends on DW_DMAC_CORE
70 select SND_SOC_INTEL_SST
71 select SND_SOC_INTEL_BAYTRAIL 71 select SND_SOC_INTEL_BAYTRAIL
72 select SND_SOC_MAX98090 72 select SND_SOC_MAX98090
73 help 73 help
@@ -76,8 +76,10 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
76 76
77config SND_SOC_INTEL_BROADWELL_MACH 77config SND_SOC_INTEL_BROADWELL_MACH
78 tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint" 78 tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
79 depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \ 79 depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
80 I2C_DESIGNWARE_PLATFORM 80 I2C_DESIGNWARE_PLATFORM
81 depends on DW_DMAC_CORE
82 select SND_SOC_INTEL_SST
81 select SND_SOC_INTEL_HASWELL 83 select SND_SOC_INTEL_HASWELL
82 select SND_SOC_RT286 84 select SND_SOC_RT286
83 help 85 help
@@ -132,3 +134,8 @@ config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
132 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell 134 This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
133 platforms with MAX98090 audio codec it also can support TI jack chip as aux device. 135 platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
134 If unsure select "N". 136 If unsure select "N".
137
138config SND_SOC_INTEL_SKYLAKE
139 tristate
140 select SND_HDA_EXT_CORE
141 select SND_SOC_INTEL_SST
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 3853ec2ddbc7..2b45435e6245 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SST) += common/
5obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/ 5obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
6obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/ 6obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ 7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
8obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
8 9
9# Machine support 10# Machine support
10obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ 11obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 31e9b9ecbb8a..d55388e082e1 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -132,7 +132,7 @@ static int sst_send_slot_map(struct sst_data *drv)
132 sizeof(cmd.header) + cmd.header.length); 132 sizeof(cmd.header) + cmd.header.length);
133} 133}
134 134
135int sst_slot_enum_info(struct snd_kcontrol *kcontrol, 135static int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
136 struct snd_ctl_elem_info *uinfo) 136 struct snd_ctl_elem_info *uinfo)
137{ 137{
138 struct sst_enum *e = (struct sst_enum *)kcontrol->private_value; 138 struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
@@ -1298,7 +1298,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
1298 dev_dbg(dai->dev, "Stream name=%s\n", 1298 dev_dbg(dai->dev, "Stream name=%s\n",
1299 dai->playback_widget->name); 1299 dai->playback_widget->name);
1300 w = dai->playback_widget; 1300 w = dai->playback_widget;
1301 list_for_each_entry(p, &w->sinks, list_source) { 1301 snd_soc_dapm_widget_for_each_sink_path(w, p) {
1302 if (p->connected && !p->connected(w, p->sink)) 1302 if (p->connected && !p->connected(w, p->sink))
1303 continue; 1303 continue;
1304 1304
@@ -1317,7 +1317,7 @@ int sst_send_pipe_gains(struct snd_soc_dai *dai, int stream, int mute)
1317 dev_dbg(dai->dev, "Stream name=%s\n", 1317 dev_dbg(dai->dev, "Stream name=%s\n",
1318 dai->capture_widget->name); 1318 dai->capture_widget->name);
1319 w = dai->capture_widget; 1319 w = dai->capture_widget;
1320 list_for_each_entry(p, &w->sources, list_sink) { 1320 snd_soc_dapm_widget_for_each_source_path(w, p) {
1321 if (p->connected && !p->connected(w, p->sink)) 1321 if (p->connected && !p->connected(w, p->sink))
1322 continue; 1322 continue;
1323 1323
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 641ebe61dc08..683e50116152 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -33,7 +33,6 @@
33 33
34struct sst_device *sst; 34struct sst_device *sst;
35static DEFINE_MUTEX(sst_lock); 35static DEFINE_MUTEX(sst_lock);
36extern struct snd_compr_ops sst_platform_compr_ops;
37 36
38int sst_register_dsp(struct sst_device *dev) 37int sst_register_dsp(struct sst_device *dev)
39{ 38{
diff --git a/sound/soc/intel/atom/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index 2409b23eeacf..cb32cc7e5ec1 100644
--- a/sound/soc/intel/atom/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -25,6 +25,7 @@
25#include "sst-atom-controls.h" 25#include "sst-atom-controls.h"
26 26
27extern struct sst_device *sst; 27extern struct sst_device *sst;
28extern struct snd_compr_ops sst_platform_compr_ops;
28 29
29#define SST_MONO 1 30#define SST_MONO 1
30#define SST_STEREO 2 31#define SST_STEREO 2
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 620da1d1b9e3..ce689c5af5ab 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -42,6 +42,11 @@
42#define MIN_FRAGMENT_SIZE (50 * 1024) 42#define MIN_FRAGMENT_SIZE (50 * 1024)
43#define MAX_FRAGMENT_SIZE (1024 * 1024) 43#define MAX_FRAGMENT_SIZE (1024 * 1024)
44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1) 44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
45#ifdef CONFIG_PM
46#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
47#else
48#define GET_USAGE_COUNT(dev) 1
49#endif
45 50
46int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) 51int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
47{ 52{
@@ -141,17 +146,12 @@ static int sst_power_control(struct device *dev, bool state)
141 int ret = 0; 146 int ret = 0;
142 int usage_count = 0; 147 int usage_count = 0;
143 148
144#ifdef CONFIG_PM
145 usage_count = atomic_read(&dev->power.usage_count);
146#else
147 usage_count = 1;
148#endif
149
150 if (state == true) { 149 if (state == true) {
151 ret = pm_runtime_get_sync(dev); 150 ret = pm_runtime_get_sync(dev);
152 151 usage_count = GET_USAGE_COUNT(dev);
153 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); 152 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
154 if (ret < 0) { 153 if (ret < 0) {
154 pm_runtime_put_sync(dev);
155 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); 155 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
156 return ret; 156 return ret;
157 } 157 }
@@ -164,6 +164,7 @@ static int sst_power_control(struct device *dev, bool state)
164 } 164 }
165 } 165 }
166 } else { 166 } else {
167 usage_count = GET_USAGE_COUNT(dev);
167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); 168 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
168 return sst_pm_runtime_put(ctx); 169 return sst_pm_runtime_put(ctx);
169 } 170 }
@@ -204,8 +205,10 @@ static int sst_cdev_open(struct device *dev,
204 struct intel_sst_drv *ctx = dev_get_drvdata(dev); 205 struct intel_sst_drv *ctx = dev_get_drvdata(dev);
205 206
206 retval = pm_runtime_get_sync(ctx->dev); 207 retval = pm_runtime_get_sync(ctx->dev);
207 if (retval < 0) 208 if (retval < 0) {
209 pm_runtime_put_sync(ctx->dev);
208 return retval; 210 return retval;
211 }
209 212
210 str_id = sst_get_stream(ctx, str_params); 213 str_id = sst_get_stream(ctx, str_params);
211 if (str_id > 0) { 214 if (str_id > 0) {
@@ -672,8 +675,10 @@ static int sst_send_byte_stream(struct device *dev,
672 if (NULL == bytes) 675 if (NULL == bytes)
673 return -EINVAL; 676 return -EINVAL;
674 ret_val = pm_runtime_get_sync(ctx->dev); 677 ret_val = pm_runtime_get_sync(ctx->dev);
675 if (ret_val < 0) 678 if (ret_val < 0) {
679 pm_runtime_put_sync(ctx->dev);
676 return ret_val; 680 return ret_val;
681 }
677 682
678 ret_val = sst_send_byte_stream_mrfld(ctx, bytes); 683 ret_val = sst_send_byte_stream_mrfld(ctx, bytes);
679 sst_pm_runtime_put(ctx); 684 sst_pm_runtime_put(ctx);
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 5a278618466c..3dc7358828b3 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -352,10 +352,9 @@ void sst_process_reply_mrfld(struct intel_sst_drv *sst_drv_ctx,
352 * copy from mailbox 352 * copy from mailbox
353 **/ 353 **/
354 if (msg_high.part.large) { 354 if (msg_high.part.large) {
355 data = kzalloc(msg_low, GFP_KERNEL); 355 data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
356 if (!data) 356 if (!data)
357 return; 357 return;
358 memcpy(data, (void *) msg->mailbox_data, msg_low);
359 /* Copy command id so that we can use to put sst to reset */ 358 /* Copy command id so that we can use to put sst to reset */
360 dsp_hdr = (struct ipc_dsp_hdr *)data; 359 dsp_hdr = (struct ipc_dsp_hdr *)data;
361 cmd_id = dsp_hdr->cmd_id; 360 cmd_id = dsp_hdr->cmd_id;
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
701 if (byt == NULL) 701 if (byt == NULL)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 byt->dev = dev;
705
704 ipc = &byt->ipc; 706 ipc = &byt->ipc;
705 ipc->dev = dev; 707 ipc->dev = dev;
706 ipc->ops.tx_msg = byt_tx_msg; 708 ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/boards/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index 7ab8cc9fbfd5..d9f81b8d915d 100644
--- a/sound/soc/intel/boards/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -126,6 +126,7 @@ static struct snd_soc_dai_link byt_max98090_dais[] = {
126 126
127static struct snd_soc_card byt_max98090_card = { 127static struct snd_soc_card byt_max98090_card = {
128 .name = "byt-max98090", 128 .name = "byt-max98090",
129 .owner = THIS_MODULE,
129 .dai_link = byt_max98090_dais, 130 .dai_link = byt_max98090_dais,
130 .num_links = ARRAY_SIZE(byt_max98090_dais), 131 .num_links = ARRAY_SIZE(byt_max98090_dais),
131 .dapm_widgets = byt_max98090_widgets, 132 .dapm_widgets = byt_max98090_widgets,
diff --git a/sound/soc/intel/boards/byt-rt5640.c b/sound/soc/intel/boards/byt-rt5640.c
index ae89b9b966d9..de9788a3fd06 100644
--- a/sound/soc/intel/boards/byt-rt5640.c
+++ b/sound/soc/intel/boards/byt-rt5640.c
@@ -197,6 +197,7 @@ static struct snd_soc_dai_link byt_rt5640_dais[] = {
197 197
198static struct snd_soc_card byt_rt5640_card = { 198static struct snd_soc_card byt_rt5640_card = {
199 .name = "byt-rt5640", 199 .name = "byt-rt5640",
200 .owner = THIS_MODULE,
200 .dai_link = byt_rt5640_dais, 201 .dai_link = byt_rt5640_dais,
201 .num_links = ARRAY_SIZE(byt_rt5640_dais), 202 .num_links = ARRAY_SIZE(byt_rt5640_dais),
202 .dapm_widgets = byt_rt5640_widgets, 203 .dapm_widgets = byt_rt5640_widgets,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7f55d59024a8..c4453120b11a 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -185,6 +185,7 @@ static struct snd_soc_dai_link byt_dailink[] = {
185/* SoC card */ 185/* SoC card */
186static struct snd_soc_card snd_soc_card_byt = { 186static struct snd_soc_card snd_soc_card_byt = {
187 .name = "baytrailcraudio", 187 .name = "baytrailcraudio",
188 .owner = THIS_MODULE,
188 .dai_link = byt_dailink, 189 .dai_link = byt_dailink,
189 .num_links = ARRAY_SIZE(byt_dailink), 190 .num_links = ARRAY_SIZE(byt_dailink),
190 .dapm_widgets = byt_dapm_widgets, 191 .dapm_widgets = byt_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index d604ee80eda4..49f4869cec48 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
69 {"Headphone", NULL, "HPR"}, 69 {"Headphone", NULL, "HPR"},
70 {"Ext Spk", NULL, "SPKL"}, 70 {"Ext Spk", NULL, "SPKL"},
71 {"Ext Spk", NULL, "SPKR"}, 71 {"Ext Spk", NULL, "SPKR"},
72 {"AIF1 Playback", NULL, "ssp2 Tx"}, 72 {"HiFi Playback", NULL, "ssp2 Tx"},
73 {"ssp2 Tx", NULL, "codec_out0"}, 73 {"ssp2 Tx", NULL, "codec_out0"},
74 {"ssp2 Tx", NULL, "codec_out1"}, 74 {"ssp2 Tx", NULL, "codec_out1"},
75 {"codec_in0", NULL, "ssp2 Rx" }, 75 {"codec_in0", NULL, "ssp2 Rx" },
76 {"codec_in1", NULL, "ssp2 Rx" }, 76 {"codec_in1", NULL, "ssp2 Rx" },
77 {"ssp2 Rx", NULL, "AIF1 Capture"}, 77 {"ssp2 Rx", NULL, "HiFi Capture"},
78}; 78};
79 79
80static const struct snd_kcontrol_new cht_mc_controls[] = { 80static const struct snd_kcontrol_new cht_mc_controls[] = {
@@ -104,21 +104,17 @@ static int cht_aif1_hw_params(struct snd_pcm_substream *substream,
104static int cht_ti_jack_event(struct notifier_block *nb, 104static int cht_ti_jack_event(struct notifier_block *nb,
105 unsigned long event, void *data) 105 unsigned long event, void *data)
106{ 106{
107
108 struct snd_soc_jack *jack = (struct snd_soc_jack *)data; 107 struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
109 struct snd_soc_dai *codec_dai = jack->card->rtd->codec_dai; 108 struct snd_soc_dapm_context *dapm = &jack->card->dapm;
110 struct snd_soc_codec *codec = codec_dai->codec;
111 109
112 if (event & SND_JACK_MICROPHONE) { 110 if (event & SND_JACK_MICROPHONE) {
113 111 snd_soc_dapm_force_enable_pin(dapm, "SHDN");
114 snd_soc_dapm_force_enable_pin(&codec->dapm, "SHDN"); 112 snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
115 snd_soc_dapm_force_enable_pin(&codec->dapm, "MICBIAS"); 113 snd_soc_dapm_sync(dapm);
116 snd_soc_dapm_sync(&codec->dapm);
117 } else { 114 } else {
118 115 snd_soc_dapm_disable_pin(dapm, "MICBIAS");
119 snd_soc_dapm_disable_pin(&codec->dapm, "MICBIAS"); 116 snd_soc_dapm_disable_pin(dapm, "SHDN");
120 snd_soc_dapm_disable_pin(&codec->dapm, "SHDN"); 117 snd_soc_dapm_sync(dapm);
121 snd_soc_dapm_sync(&codec->dapm);
122 } 118 }
123 119
124 return 0; 120 return 0;
@@ -279,6 +275,7 @@ static struct snd_soc_dai_link cht_dailink[] = {
279/* SoC card */ 275/* SoC card */
280static struct snd_soc_card snd_soc_card_cht = { 276static struct snd_soc_card snd_soc_card_cht = {
281 .name = "chtmax98090", 277 .name = "chtmax98090",
278 .owner = THIS_MODULE,
282 .dai_link = cht_dailink, 279 .dai_link = cht_dailink,
283 .num_links = ARRAY_SIZE(cht_dailink), 280 .num_links = ARRAY_SIZE(cht_dailink),
284 .aux_dev = &cht_max98090_headset_dev, 281 .aux_dev = &cht_max98090_headset_dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index bdcaf467842a..7be8461e4d3b 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -305,6 +305,7 @@ static struct snd_soc_dai_link cht_dailink[] = {
305/* SoC card */ 305/* SoC card */
306static struct snd_soc_card snd_soc_card_chtrt5645 = { 306static struct snd_soc_card snd_soc_card_chtrt5645 = {
307 .name = "chtrt5645", 307 .name = "chtrt5645",
308 .owner = THIS_MODULE,
308 .dai_link = cht_dailink, 309 .dai_link = cht_dailink,
309 .num_links = ARRAY_SIZE(cht_dailink), 310 .num_links = ARRAY_SIZE(cht_dailink),
310 .dapm_widgets = cht_dapm_widgets, 311 .dapm_widgets = cht_dapm_widgets,
@@ -317,6 +318,7 @@ static struct snd_soc_card snd_soc_card_chtrt5645 = {
317 318
318static struct snd_soc_card snd_soc_card_chtrt5650 = { 319static struct snd_soc_card snd_soc_card_chtrt5650 = {
319 .name = "chtrt5650", 320 .name = "chtrt5650",
321 .owner = THIS_MODULE,
320 .dai_link = cht_dailink, 322 .dai_link = cht_dailink,
321 .num_links = ARRAY_SIZE(cht_dailink), 323 .num_links = ARRAY_SIZE(cht_dailink),
322 .dapm_widgets = cht_dapm_widgets, 324 .dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 2c9cc5be439e..23fe04075142 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -323,6 +323,7 @@ static int cht_resume_post(struct snd_soc_card *card)
323/* SoC card */ 323/* SoC card */
324static struct snd_soc_card snd_soc_card_cht = { 324static struct snd_soc_card snd_soc_card_cht = {
325 .name = "cherrytrailcraudio", 325 .name = "cherrytrailcraudio",
326 .owner = THIS_MODULE,
326 .dai_link = cht_dailink, 327 .dai_link = cht_dailink,
327 .num_links = ARRAY_SIZE(cht_dailink), 328 .num_links = ARRAY_SIZE(cht_dailink),
328 .dapm_widgets = cht_dapm_widgets, 329 .dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index 396d54510350..cbd568eac033 100644
--- a/sound/soc/intel/common/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -22,6 +22,8 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/firmware.h> 23#include <linux/firmware.h>
24 24
25#include "../skylake/skl-sst-dsp.h"
26
25struct sst_mem_block; 27struct sst_mem_block;
26struct sst_module; 28struct sst_module;
27struct sst_fw; 29struct sst_fw;
@@ -258,6 +260,8 @@ struct sst_mem_block {
258 */ 260 */
259struct sst_dsp { 261struct sst_dsp {
260 262
263 /* Shared for all platforms */
264
261 /* runtime */ 265 /* runtime */
262 struct sst_dsp_device *sst_dev; 266 struct sst_dsp_device *sst_dev;
263 spinlock_t spinlock; /* IPC locking */ 267 spinlock_t spinlock; /* IPC locking */
@@ -268,10 +272,6 @@ struct sst_dsp {
268 int irq; 272 int irq;
269 u32 id; 273 u32 id;
270 274
271 /* list of free and used ADSP memory blocks */
272 struct list_head used_block_list;
273 struct list_head free_block_list;
274
275 /* operations */ 275 /* operations */
276 struct sst_ops *ops; 276 struct sst_ops *ops;
277 277
@@ -284,6 +284,12 @@ struct sst_dsp {
284 /* mailbox */ 284 /* mailbox */
285 struct sst_mailbox mailbox; 285 struct sst_mailbox mailbox;
286 286
287 /* HSW/Byt data */
288
289 /* list of free and used ADSP memory blocks */
290 struct list_head used_block_list;
291 struct list_head free_block_list;
292
287 /* SST FW files loaded and their modules */ 293 /* SST FW files loaded and their modules */
288 struct list_head module_list; 294 struct list_head module_list;
289 struct list_head fw_list; 295 struct list_head fw_list;
@@ -299,6 +305,15 @@ struct sst_dsp {
299 /* DMA FW loading */ 305 /* DMA FW loading */
300 struct sst_dma *dma; 306 struct sst_dma *dma;
301 bool fw_use_dma; 307 bool fw_use_dma;
308
309 /* SKL data */
310
311 /* To allocate CL dma buffers */
312 struct skl_dsp_loader_ops dsp_ops;
313 struct skl_dsp_fw_ops fw_ops;
314 int sst_state;
315 struct skl_cl_dev cl_dev;
316 u32 intr_status;
302}; 317};
303 318
304/* Size optimised DRAM/IRAM memcpy */ 319/* Size optimised DRAM/IRAM memcpy */
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 64e94212d2d2..a627236dd1f5 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/delay.h>
23 24
24#include "sst-dsp.h" 25#include "sst-dsp.h"
25#include "sst-dsp-priv.h" 26#include "sst-dsp-priv.h"
@@ -196,6 +197,22 @@ int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
196} 197}
197EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked); 198EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked);
198 199
200/* This is for registers bits with attribute RWC */
201void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
202 u32 mask, u32 value)
203{
204 unsigned int old, new;
205 u32 ret;
206
207 ret = sst_dsp_shim_read_unlocked(sst, offset);
208
209 old = ret;
210 new = (old & (~mask)) | (value & mask);
211
212 sst_dsp_shim_write_unlocked(sst, offset, new);
213}
214EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced_unlocked);
215
199int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset, 216int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
200 u32 mask, u32 value) 217 u32 mask, u32 value)
201{ 218{
@@ -222,6 +239,60 @@ int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
222} 239}
223EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64); 240EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64);
224 241
242/* This is for registers bits with attribute RWC */
243void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
244 u32 mask, u32 value)
245{
246 unsigned long flags;
247
248 spin_lock_irqsave(&sst->spinlock, flags);
249 sst_dsp_shim_update_bits_forced_unlocked(sst, offset, mask, value);
250 spin_unlock_irqrestore(&sst->spinlock, flags);
251}
252EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced);
253
254int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
255 u32 target, u32 timeout, char *operation)
256{
257 int time, ret;
258 u32 reg;
259 bool done = false;
260
261 /*
262 * we will poll for couple of ms using mdelay, if not successful
263 * then go to longer sleep using usleep_range
264 */
265
266 /* check if set state successful */
267 for (time = 0; time < 5; time++) {
268 if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target) {
269 done = true;
270 break;
271 }
272 mdelay(1);
273 }
274
275 if (done == false) {
276 /* sleeping in 10ms steps so adjust timeout value */
277 timeout /= 10;
278
279 for (time = 0; time < timeout; time++) {
280 if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target)
281 break;
282
283 usleep_range(5000, 10000);
284 }
285 }
286
287 reg = sst_dsp_shim_read_unlocked(ctx, offset);
288 dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
289 (time < timeout) ? "successful" : "timedout");
290 ret = time < timeout ? 0 : -ETIME;
291
292 return ret;
293}
294EXPORT_SYMBOL_GPL(sst_dsp_register_poll);
295
225void sst_dsp_dump(struct sst_dsp *sst) 296void sst_dsp_dump(struct sst_dsp *sst)
226{ 297{
227 if (sst->ops->dump) 298 if (sst->ops->dump)
diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h
index 96aeb2556ad4..1f45f18715c0 100644
--- a/sound/soc/intel/common/sst-dsp.h
+++ b/sound/soc/intel/common/sst-dsp.h
@@ -230,6 +230,8 @@ void sst_dsp_shim_write64(struct sst_dsp *sst, u32 offset, u64 value);
230u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset); 230u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
231int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset, 231int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
232 u64 mask, u64 value); 232 u64 mask, u64 value);
233void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
234 u32 mask, u32 value);
233 235
234/* SHIM Read / Write Unlocked for callers already holding sst lock */ 236/* SHIM Read / Write Unlocked for callers already holding sst lock */
235void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value); 237void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
@@ -240,6 +242,8 @@ void sst_dsp_shim_write64_unlocked(struct sst_dsp *sst, u32 offset, u64 value);
240u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset); 242u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
241int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset, 243int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
242 u64 mask, u64 value); 244 u64 mask, u64 value);
245void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
246 u32 mask, u32 value);
243 247
244/* Internal generic low-level SST IO functions - can be overidden */ 248/* Internal generic low-level SST IO functions - can be overidden */
245void sst_shim32_write(void __iomem *addr, u32 offset, u32 value); 249void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
@@ -278,6 +282,8 @@ void sst_dsp_inbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
278void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes); 282void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
279void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes); 283void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
280void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes); 284void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
285int sst_dsp_register_poll(struct sst_dsp *dsp, u32 offset, u32 mask,
286 u32 expected_value, u32 timeout, char *operation);
281 287
282/* Debug */ 288/* Debug */
283void sst_dsp_dump(struct sst_dsp *sst); 289void sst_dsp_dump(struct sst_dsp *sst);
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
2119 if (hsw == NULL) 2119 if (hsw == NULL)
2120 return -ENOMEM; 2120 return -ENOMEM;
2121 2121
2122 hsw->dev = dev;
2123
2122 ipc = &hsw->ipc; 2124 ipc = &hsw->ipc;
2123 ipc->dev = dev; 2125 ipc->dev = dev;
2124 ipc->ops.tx_msg = hsw_tx_msg; 2126 ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
new file mode 100644
index 000000000000..27db22178204
--- /dev/null
+++ b/sound/soc/intel/skylake/Makefile
@@ -0,0 +1,9 @@
1snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o
2
3obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
4
5# Skylake IPC Support
6snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
7 skl-sst.o
8
9obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
new file mode 100644
index 000000000000..826d4fd8930a
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -0,0 +1,884 @@
1/*
2 * skl-message.c - HDA DSP interface for FW registration, Pipe and Module
3 * configurations
4 *
5 * Copyright (C) 2015 Intel Corp
6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
7 * Jeeja KP <jeeja.kp@intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20#include <linux/slab.h>
21#include <linux/pci.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include "skl-sst-dsp.h"
25#include "skl-sst-ipc.h"
26#include "skl.h"
27#include "../common/sst-dsp.h"
28#include "../common/sst-dsp-priv.h"
29#include "skl-topology.h"
30#include "skl-tplg-interface.h"
31
32static int skl_alloc_dma_buf(struct device *dev,
33 struct snd_dma_buffer *dmab, size_t size)
34{
35 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
36 struct hdac_bus *bus = ebus_to_hbus(ebus);
37
38 if (!bus)
39 return -ENODEV;
40
41 return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
42}
43
44static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
45{
46 struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
47 struct hdac_bus *bus = ebus_to_hbus(ebus);
48
49 if (!bus)
50 return -ENODEV;
51
52 bus->io_ops->dma_free_pages(bus, dmab);
53
54 return 0;
55}
56
57int skl_init_dsp(struct skl *skl)
58{
59 void __iomem *mmio_base;
60 struct hdac_ext_bus *ebus = &skl->ebus;
61 struct hdac_bus *bus = ebus_to_hbus(ebus);
62 int irq = bus->irq;
63 struct skl_dsp_loader_ops loader_ops;
64 int ret;
65
66 loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
67 loader_ops.free_dma_buf = skl_free_dma_buf;
68
69 /* enable ppcap interrupt */
70 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
71 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
72
73 /* read the BAR of the ADSP MMIO */
74 mmio_base = pci_ioremap_bar(skl->pci, 4);
75 if (mmio_base == NULL) {
76 dev_err(bus->dev, "ioremap error\n");
77 return -ENXIO;
78 }
79
80 ret = skl_sst_dsp_init(bus->dev, mmio_base, irq,
81 loader_ops, &skl->skl_sst);
82
83 dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
84
85 return ret;
86}
87
88void skl_free_dsp(struct skl *skl)
89{
90 struct hdac_ext_bus *ebus = &skl->ebus;
91 struct hdac_bus *bus = ebus_to_hbus(ebus);
92 struct skl_sst *ctx = skl->skl_sst;
93
94 /* disable ppcap interrupt */
95 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
96
97 skl_sst_dsp_cleanup(bus->dev, ctx);
98 if (ctx->dsp->addr.lpe)
99 iounmap(ctx->dsp->addr.lpe);
100}
101
102int skl_suspend_dsp(struct skl *skl)
103{
104 struct skl_sst *ctx = skl->skl_sst;
105 int ret;
106
107 /* if ppcap is not supported return 0 */
108 if (!skl->ebus.ppcap)
109 return 0;
110
111 ret = skl_dsp_sleep(ctx->dsp);
112 if (ret < 0)
113 return ret;
114
115 /* disable ppcap interrupt */
116 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
117 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
118
119 return 0;
120}
121
122int skl_resume_dsp(struct skl *skl)
123{
124 struct skl_sst *ctx = skl->skl_sst;
125
126 /* if ppcap is not supported return 0 */
127 if (!skl->ebus.ppcap)
128 return 0;
129
130 /* enable ppcap interrupt */
131 snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
132 snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
133
134 return skl_dsp_wake(ctx->dsp);
135}
136
137enum skl_bitdepth skl_get_bit_depth(int params)
138{
139 switch (params) {
140 case 8:
141 return SKL_DEPTH_8BIT;
142
143 case 16:
144 return SKL_DEPTH_16BIT;
145
146 case 24:
147 return SKL_DEPTH_24BIT;
148
149 case 32:
150 return SKL_DEPTH_32BIT;
151
152 default:
153 return SKL_DEPTH_INVALID;
154
155 }
156}
157
158static u32 skl_create_channel_map(enum skl_ch_cfg ch_cfg)
159{
160 u32 config;
161
162 switch (ch_cfg) {
163 case SKL_CH_CFG_MONO:
164 config = (0xFFFFFFF0 | SKL_CHANNEL_LEFT);
165 break;
166
167 case SKL_CH_CFG_STEREO:
168 config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
169 | (SKL_CHANNEL_RIGHT << 4));
170 break;
171
172 case SKL_CH_CFG_2_1:
173 config = (0xFFFFF000 | SKL_CHANNEL_LEFT
174 | (SKL_CHANNEL_RIGHT << 4)
175 | (SKL_CHANNEL_LFE << 8));
176 break;
177
178 case SKL_CH_CFG_3_0:
179 config = (0xFFFFF000 | SKL_CHANNEL_LEFT
180 | (SKL_CHANNEL_CENTER << 4)
181 | (SKL_CHANNEL_RIGHT << 8));
182 break;
183
184 case SKL_CH_CFG_3_1:
185 config = (0xFFFF0000 | SKL_CHANNEL_LEFT
186 | (SKL_CHANNEL_CENTER << 4)
187 | (SKL_CHANNEL_RIGHT << 8)
188 | (SKL_CHANNEL_LFE << 12));
189 break;
190
191 case SKL_CH_CFG_QUATRO:
192 config = (0xFFFF0000 | SKL_CHANNEL_LEFT
193 | (SKL_CHANNEL_RIGHT << 4)
194 | (SKL_CHANNEL_LEFT_SURROUND << 8)
195 | (SKL_CHANNEL_RIGHT_SURROUND << 12));
196 break;
197
198 case SKL_CH_CFG_4_0:
199 config = (0xFFFF0000 | SKL_CHANNEL_LEFT
200 | (SKL_CHANNEL_CENTER << 4)
201 | (SKL_CHANNEL_RIGHT << 8)
202 | (SKL_CHANNEL_CENTER_SURROUND << 12));
203 break;
204
205 case SKL_CH_CFG_5_0:
206 config = (0xFFF00000 | SKL_CHANNEL_LEFT
207 | (SKL_CHANNEL_CENTER << 4)
208 | (SKL_CHANNEL_RIGHT << 8)
209 | (SKL_CHANNEL_LEFT_SURROUND << 12)
210 | (SKL_CHANNEL_RIGHT_SURROUND << 16));
211 break;
212
213 case SKL_CH_CFG_5_1:
214 config = (0xFF000000 | SKL_CHANNEL_CENTER
215 | (SKL_CHANNEL_LEFT << 4)
216 | (SKL_CHANNEL_RIGHT << 8)
217 | (SKL_CHANNEL_LEFT_SURROUND << 12)
218 | (SKL_CHANNEL_RIGHT_SURROUND << 16)
219 | (SKL_CHANNEL_LFE << 20));
220 break;
221
222 case SKL_CH_CFG_DUAL_MONO:
223 config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
224 | (SKL_CHANNEL_LEFT << 4));
225 break;
226
227 case SKL_CH_CFG_I2S_DUAL_STEREO_0:
228 config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
229 | (SKL_CHANNEL_RIGHT << 4));
230 break;
231
232 case SKL_CH_CFG_I2S_DUAL_STEREO_1:
233 config = (0xFFFF00FF | (SKL_CHANNEL_LEFT << 8)
234 | (SKL_CHANNEL_RIGHT << 12));
235 break;
236
237 default:
238 config = 0xFFFFFFFF;
239 break;
240
241 }
242
243 return config;
244}
245
246/*
247 * Each module in DSP expects a base module configuration, which consists of
248 * PCM format information, which we calculate in driver and resource values
249 * which are read from widget information passed through topology binary
250 * This is send when we create a module with INIT_INSTANCE IPC msg
251 */
252static void skl_set_base_module_format(struct skl_sst *ctx,
253 struct skl_module_cfg *mconfig,
254 struct skl_base_cfg *base_cfg)
255{
256 struct skl_module_fmt *format = &mconfig->in_fmt;
257
258 base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
259
260 base_cfg->audio_fmt.s_freq = format->s_freq;
261 base_cfg->audio_fmt.bit_depth = format->bit_depth;
262 base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
263 base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
264
265 dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
266 format->bit_depth, format->valid_bit_depth,
267 format->ch_cfg);
268
269 base_cfg->audio_fmt.channel_map = skl_create_channel_map(
270 base_cfg->audio_fmt.ch_cfg);
271
272 base_cfg->audio_fmt.interleaving = SKL_INTERLEAVING_PER_CHANNEL;
273
274 base_cfg->cps = mconfig->mcps;
275 base_cfg->ibs = mconfig->ibs;
276 base_cfg->obs = mconfig->obs;
277}
278
279/*
280 * Copies copier capabilities into copier module and updates copier module
281 * config size.
282 */
283static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
284 struct skl_cpr_cfg *cpr_mconfig)
285{
286 if (mconfig->formats_config.caps_size == 0)
287 return;
288
289 memcpy(cpr_mconfig->gtw_cfg.config_data,
290 mconfig->formats_config.caps,
291 mconfig->formats_config.caps_size);
292
293 cpr_mconfig->gtw_cfg.config_length =
294 (mconfig->formats_config.caps_size) / 4;
295}
296
297/*
298 * Calculate the gatewat settings required for copier module, type of
299 * gateway and index of gateway to use
300 */
301static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
302 struct skl_module_cfg *mconfig,
303 struct skl_cpr_cfg *cpr_mconfig)
304{
305 union skl_connector_node_id node_id = {0};
306 struct skl_pipe_params *params = mconfig->pipe->p_params;
307
308 switch (mconfig->dev_type) {
309 case SKL_DEVICE_BT:
310 node_id.node.dma_type =
311 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
312 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
313 SKL_DMA_I2S_LINK_INPUT_CLASS;
314 node_id.node.vindex = params->host_dma_id +
315 (mconfig->vbus_id << 3);
316 break;
317
318 case SKL_DEVICE_I2S:
319 node_id.node.dma_type =
320 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
321 SKL_DMA_I2S_LINK_OUTPUT_CLASS :
322 SKL_DMA_I2S_LINK_INPUT_CLASS;
323 node_id.node.vindex = params->host_dma_id +
324 (mconfig->time_slot << 1) +
325 (mconfig->vbus_id << 3);
326 break;
327
328 case SKL_DEVICE_DMIC:
329 node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
330 node_id.node.vindex = mconfig->vbus_id +
331 (mconfig->time_slot);
332 break;
333
334 case SKL_DEVICE_HDALINK:
335 node_id.node.dma_type =
336 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
337 SKL_DMA_HDA_LINK_OUTPUT_CLASS :
338 SKL_DMA_HDA_LINK_INPUT_CLASS;
339 node_id.node.vindex = params->link_dma_id;
340 break;
341
342 default:
343 node_id.node.dma_type =
344 (SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
345 SKL_DMA_HDA_HOST_OUTPUT_CLASS :
346 SKL_DMA_HDA_HOST_INPUT_CLASS;
347 node_id.node.vindex = params->host_dma_id;
348 break;
349 }
350
351 cpr_mconfig->gtw_cfg.node_id = node_id.val;
352
353 if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
354 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
355 else
356 cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
357
358 cpr_mconfig->cpr_feature_mask = 0;
359 cpr_mconfig->gtw_cfg.config_length = 0;
360
361 skl_copy_copier_caps(mconfig, cpr_mconfig);
362}
363
364static void skl_setup_out_format(struct skl_sst *ctx,
365 struct skl_module_cfg *mconfig,
366 struct skl_audio_data_format *out_fmt)
367{
368 struct skl_module_fmt *format = &mconfig->out_fmt;
369
370 out_fmt->number_of_channels = (u8)format->channels;
371 out_fmt->s_freq = format->s_freq;
372 out_fmt->bit_depth = format->bit_depth;
373 out_fmt->valid_bit_depth = format->valid_bit_depth;
374 out_fmt->ch_cfg = format->ch_cfg;
375
376 out_fmt->channel_map = skl_create_channel_map(out_fmt->ch_cfg);
377 out_fmt->interleaving = SKL_INTERLEAVING_PER_CHANNEL;
378
379 dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
380 out_fmt->number_of_channels, format->s_freq, format->bit_depth);
381}
382
383/*
384 * DSP needs SRC module for frequency conversion, SRC takes base module
385 * configuration and the target frequency as extra parameter passed as src
386 * config
387 */
388static void skl_set_src_format(struct skl_sst *ctx,
389 struct skl_module_cfg *mconfig,
390 struct skl_src_module_cfg *src_mconfig)
391{
392 struct skl_module_fmt *fmt = &mconfig->out_fmt;
393
394 skl_set_base_module_format(ctx, mconfig,
395 (struct skl_base_cfg *)src_mconfig);
396
397 src_mconfig->src_cfg = fmt->s_freq;
398}
399
400/*
401 * DSP needs updown module to do channel conversion. updown module take base
402 * module configuration and channel configuration
403 * It also take coefficients and now we have defaults applied here
404 */
405static void skl_set_updown_mixer_format(struct skl_sst *ctx,
406 struct skl_module_cfg *mconfig,
407 struct skl_up_down_mixer_cfg *mixer_mconfig)
408{
409 struct skl_module_fmt *fmt = &mconfig->out_fmt;
410 int i = 0;
411
412 skl_set_base_module_format(ctx, mconfig,
413 (struct skl_base_cfg *)mixer_mconfig);
414 mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
415
416 /* Select F/W default coefficient */
417 mixer_mconfig->coeff_sel = 0x0;
418
419 /* User coeff, don't care since we are selecting F/W defaults */
420 for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
421 mixer_mconfig->coeff[i] = 0xDEADBEEF;
422}
423
424/*
425 * 'copier' is DSP internal module which copies data from Host DMA (HDA host
426 * dma) or link (hda link, SSP, PDM)
427 * Here we calculate the copier module parameters, like PCM format, output
428 * format, gateway settings
429 * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
430 */
431static void skl_set_copier_format(struct skl_sst *ctx,
432 struct skl_module_cfg *mconfig,
433 struct skl_cpr_cfg *cpr_mconfig)
434{
435 struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
436 struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
437
438 skl_set_base_module_format(ctx, mconfig, base_cfg);
439
440 skl_setup_out_format(ctx, mconfig, out_fmt);
441 skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
442}
443
444static u16 skl_get_module_param_size(struct skl_sst *ctx,
445 struct skl_module_cfg *mconfig)
446{
447 u16 param_size;
448
449 switch (mconfig->m_type) {
450 case SKL_MODULE_TYPE_COPIER:
451 param_size = sizeof(struct skl_cpr_cfg);
452 param_size += mconfig->formats_config.caps_size;
453 return param_size;
454
455 case SKL_MODULE_TYPE_SRCINT:
456 return sizeof(struct skl_src_module_cfg);
457
458 case SKL_MODULE_TYPE_UPDWMIX:
459 return sizeof(struct skl_up_down_mixer_cfg);
460
461 default:
462 /*
463 * return only base cfg when no specific module type is
464 * specified
465 */
466 return sizeof(struct skl_base_cfg);
467 }
468
469 return 0;
470}
471
472/*
473 * DSP firmware supports various modules like copier, SRC, updown etc.
474 * These modules required various parameters to be calculated and sent for
475 * the module initialization to DSP. By default a generic module needs only
476 * base module format configuration
477 */
478
479static int skl_set_module_format(struct skl_sst *ctx,
480 struct skl_module_cfg *module_config,
481 u16 *module_config_size,
482 void **param_data)
483{
484 u16 param_size;
485
486 param_size = skl_get_module_param_size(ctx, module_config);
487
488 *param_data = kzalloc(param_size, GFP_KERNEL);
489 if (NULL == *param_data)
490 return -ENOMEM;
491
492 *module_config_size = param_size;
493
494 switch (module_config->m_type) {
495 case SKL_MODULE_TYPE_COPIER:
496 skl_set_copier_format(ctx, module_config, *param_data);
497 break;
498
499 case SKL_MODULE_TYPE_SRCINT:
500 skl_set_src_format(ctx, module_config, *param_data);
501 break;
502
503 case SKL_MODULE_TYPE_UPDWMIX:
504 skl_set_updown_mixer_format(ctx, module_config, *param_data);
505 break;
506
507 default:
508 skl_set_base_module_format(ctx, module_config, *param_data);
509 break;
510
511 }
512
513 dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
514 module_config->id.module_id, param_size);
515 print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
516 *param_data, param_size, false);
517 return 0;
518}
519
520static int skl_get_queue_index(struct skl_module_pin *mpin,
521 struct skl_module_inst_id id, int max)
522{
523 int i;
524
525 for (i = 0; i < max; i++) {
526 if (mpin[i].id.module_id == id.module_id &&
527 mpin[i].id.instance_id == id.instance_id)
528 return i;
529 }
530
531 return -EINVAL;
532}
533
534/*
535 * Allocates queue for each module.
536 * if dynamic, the pin_index is allocated 0 to max_pin.
537 * In static, the pin_index is fixed based on module_id and instance id
538 */
539static int skl_alloc_queue(struct skl_module_pin *mpin,
540 struct skl_module_inst_id id, int max)
541{
542 int i;
543
544 /*
545 * if pin in dynamic, find first free pin
546 * otherwise find match module and instance id pin as topology will
547 * ensure a unique pin is assigned to this so no need to
548 * allocate/free
549 */
550 for (i = 0; i < max; i++) {
551 if (mpin[i].is_dynamic) {
552 if (!mpin[i].in_use) {
553 mpin[i].in_use = true;
554 mpin[i].id.module_id = id.module_id;
555 mpin[i].id.instance_id = id.instance_id;
556 return i;
557 }
558 } else {
559 if (mpin[i].id.module_id == id.module_id &&
560 mpin[i].id.instance_id == id.instance_id)
561 return i;
562 }
563 }
564
565 return -EINVAL;
566}
567
568static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
569{
570 if (mpin[q_index].is_dynamic) {
571 mpin[q_index].in_use = false;
572 mpin[q_index].id.module_id = 0;
573 mpin[q_index].id.instance_id = 0;
574 }
575}
576
577/*
578 * A module needs to be instanataited in DSP. A mdoule is present in a
579 * collection of module referred as a PIPE.
580 * We first calculate the module format, based on module type and then
581 * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
582 */
583int skl_init_module(struct skl_sst *ctx,
584 struct skl_module_cfg *mconfig, char *param)
585{
586 u16 module_config_size = 0;
587 void *param_data = NULL;
588 int ret;
589 struct skl_ipc_init_instance_msg msg;
590
591 dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
592 mconfig->id.module_id, mconfig->id.instance_id);
593
594 if (mconfig->pipe->state != SKL_PIPE_CREATED) {
595 dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
596 mconfig->pipe->state, mconfig->pipe->ppl_id);
597 return -EIO;
598 }
599
600 ret = skl_set_module_format(ctx, mconfig,
601 &module_config_size, &param_data);
602 if (ret < 0) {
603 dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
604 return ret;
605 }
606
607 msg.module_id = mconfig->id.module_id;
608 msg.instance_id = mconfig->id.instance_id;
609 msg.ppl_instance_id = mconfig->pipe->ppl_id;
610 msg.param_data_size = module_config_size;
611 msg.core_id = mconfig->core_id;
612
613 ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
614 if (ret < 0) {
615 dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
616 kfree(param_data);
617 return ret;
618 }
619 mconfig->m_state = SKL_MODULE_INIT_DONE;
620
621 return ret;
622}
623
624static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
625 *src_module, struct skl_module_cfg *dst_module)
626{
627 dev_dbg(ctx->dev, "%s: src module_id = %d src_instance=%d\n",
628 __func__, src_module->id.module_id, src_module->id.instance_id);
629 dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
630 dst_module->id.module_id, dst_module->id.instance_id);
631
632 dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
633 src_module->m_state, dst_module->m_state);
634}
635
636/*
637 * On module freeup, we need to unbind the module with modules
638 * it is already bind.
639 * Find the pin allocated and unbind then using bind_unbind IPC
640 */
641int skl_unbind_modules(struct skl_sst *ctx,
642 struct skl_module_cfg *src_mcfg,
643 struct skl_module_cfg *dst_mcfg)
644{
645 int ret;
646 struct skl_ipc_bind_unbind_msg msg;
647 struct skl_module_inst_id src_id = src_mcfg->id;
648 struct skl_module_inst_id dst_id = dst_mcfg->id;
649 int in_max = dst_mcfg->max_in_queue;
650 int out_max = src_mcfg->max_out_queue;
651 int src_index, dst_index;
652
653 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
654
655 if (src_mcfg->m_state != SKL_MODULE_BIND_DONE)
656 return 0;
657
658 /*
659 * if intra module unbind, check if both modules are BIND,
660 * then send unbind
661 */
662 if ((src_mcfg->pipe->ppl_id != dst_mcfg->pipe->ppl_id) &&
663 dst_mcfg->m_state != SKL_MODULE_BIND_DONE)
664 return 0;
665 else if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
666 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
667 return 0;
668
669 /* get src queue index */
670 src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
671 if (src_index < 0)
672 return -EINVAL;
673
674 msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
675
676 /* get dst queue index */
677 dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
678 if (dst_index < 0)
679 return -EINVAL;
680
681 msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
682
683 msg.module_id = src_mcfg->id.module_id;
684 msg.instance_id = src_mcfg->id.instance_id;
685 msg.dst_module_id = dst_mcfg->id.module_id;
686 msg.dst_instance_id = dst_mcfg->id.instance_id;
687 msg.bind = false;
688
689 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
690 if (!ret) {
691 src_mcfg->m_state = SKL_MODULE_UNINIT;
692 /* free queue only if unbind is success */
693 skl_free_queue(src_mcfg->m_out_pin, src_index);
694 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
695 }
696
697 return ret;
698}
699
700/*
701 * Once a module is instantiated it need to be 'bind' with other modules in
702 * the pipeline. For binding we need to find the module pins which are bind
703 * together
704 * This function finds the pins and then sends bund_unbind IPC message to
705 * DSP using IPC helper
706 */
707int skl_bind_modules(struct skl_sst *ctx,
708 struct skl_module_cfg *src_mcfg,
709 struct skl_module_cfg *dst_mcfg)
710{
711 int ret;
712 struct skl_ipc_bind_unbind_msg msg;
713 struct skl_module_inst_id src_id = src_mcfg->id;
714 struct skl_module_inst_id dst_id = dst_mcfg->id;
715 int in_max = dst_mcfg->max_in_queue;
716 int out_max = src_mcfg->max_out_queue;
717 int src_index, dst_index;
718
719 skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
720
721 if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
722 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
723 return 0;
724
725 src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_id, out_max);
726 if (src_index < 0)
727 return -EINVAL;
728
729 msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
730 dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_id, in_max);
731 if (dst_index < 0) {
732 skl_free_queue(src_mcfg->m_out_pin, src_index);
733 return -EINVAL;
734 }
735
736 msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
737
738 dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
739 msg.src_queue, msg.dst_queue);
740
741 msg.module_id = src_mcfg->id.module_id;
742 msg.instance_id = src_mcfg->id.instance_id;
743 msg.dst_module_id = dst_mcfg->id.module_id;
744 msg.dst_instance_id = dst_mcfg->id.instance_id;
745 msg.bind = true;
746
747 ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
748
749 if (!ret) {
750 src_mcfg->m_state = SKL_MODULE_BIND_DONE;
751 } else {
752 /* error case , if IPC fails, clear the queue index */
753 skl_free_queue(src_mcfg->m_out_pin, src_index);
754 skl_free_queue(dst_mcfg->m_in_pin, dst_index);
755 }
756
757 return ret;
758}
759
760static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
761 enum skl_ipc_pipeline_state state)
762{
763 dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
764
765 return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
766}
767
768/*
769 * A pipeline is a collection of modules. Before a module in instantiated a
770 * pipeline needs to be created for it.
771 * This function creates pipeline, by sending create pipeline IPC messages
772 * to FW
773 */
774int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
775{
776 int ret;
777
778 dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
779
780 ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
781 pipe->pipe_priority, pipe->ppl_id);
782 if (ret < 0) {
783 dev_err(ctx->dev, "Failed to create pipeline\n");
784 return ret;
785 }
786
787 pipe->state = SKL_PIPE_CREATED;
788
789 return 0;
790}
791
792/*
793 * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
794 * pause the pipeline first and then delete it
795 * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
796 * DMA engines and releases resources
797 */
798int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
799{
800 int ret;
801
802 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
803
804 /* If pipe is not started, do not try to stop the pipe in FW. */
805 if (pipe->state > SKL_PIPE_STARTED) {
806 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
807 if (ret < 0) {
808 dev_err(ctx->dev, "Failed to stop pipeline\n");
809 return ret;
810 }
811
812 pipe->state = SKL_PIPE_PAUSED;
813 } else {
814 /* If pipe was not created in FW, do not try to delete it */
815 if (pipe->state < SKL_PIPE_CREATED)
816 return 0;
817
818 ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
819 if (ret < 0)
820 dev_err(ctx->dev, "Failed to delete pipeline\n");
821 }
822
823 return ret;
824}
825
826/*
827 * A pipeline is also a scheduling entity in DSP which can be run, stopped
828 * For processing data the pipe need to be run by sending IPC set pipe state
829 * to DSP
830 */
831int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
832{
833 int ret;
834
835 dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
836
837 /* If pipe was not created in FW, do not try to pause or delete */
838 if (pipe->state < SKL_PIPE_CREATED)
839 return 0;
840
841 /* Pipe has to be paused before it is started */
842 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
843 if (ret < 0) {
844 dev_err(ctx->dev, "Failed to pause pipe\n");
845 return ret;
846 }
847
848 pipe->state = SKL_PIPE_PAUSED;
849
850 ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
851 if (ret < 0) {
852 dev_err(ctx->dev, "Failed to start pipe\n");
853 return ret;
854 }
855
856 pipe->state = SKL_PIPE_STARTED;
857
858 return 0;
859}
860
861/*
862 * Stop the pipeline by sending set pipe state IPC
863 * DSP doesnt implement stop so we always send pause message
864 */
865int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
866{
867 int ret;
868
869 dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
870
871 /* If pipe was not created in FW, do not try to pause or delete */
872 if (pipe->state < SKL_PIPE_PAUSED)
873 return 0;
874
875 ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
876 if (ret < 0) {
877 dev_dbg(ctx->dev, "Failed to stop pipe\n");
878 return ret;
879 }
880
881 pipe->state = SKL_PIPE_CREATED;
882
883 return 0;
884}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
new file mode 100644
index 000000000000..13036b19d7e5
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -0,0 +1,140 @@
1/*
2 * skl-nhlt.c - Intel SKL Platform NHLT parsing
3 *
4 * Copyright (C) 2015 Intel Corp
5 * Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 *
19 */
20#include "skl.h"
21
22/* Unique identification for getting NHLT blobs */
23static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
24 0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
25
26#define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
27
28void __iomem *skl_nhlt_init(struct device *dev)
29{
30 acpi_handle handle;
31 union acpi_object *obj;
32 struct nhlt_resource_desc *nhlt_ptr = NULL;
33
34 if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
35 dev_err(dev, "Requested NHLT device not found\n");
36 return NULL;
37 }
38
39 obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
40 if (obj && obj->type == ACPI_TYPE_BUFFER) {
41 nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
42
43 return ioremap_cache(nhlt_ptr->min_addr, nhlt_ptr->length);
44 }
45
46 dev_err(dev, "device specific method to extract NHLT blob failed\n");
47 return NULL;
48}
49
50void skl_nhlt_free(void __iomem *addr)
51{
52 iounmap(addr);
53 addr = NULL;
54}
55
56static struct nhlt_specific_cfg *skl_get_specific_cfg(
57 struct device *dev, struct nhlt_fmt *fmt,
58 u8 no_ch, u32 rate, u16 bps)
59{
60 struct nhlt_specific_cfg *sp_config;
61 struct wav_fmt *wfmt;
62 struct nhlt_fmt_cfg *fmt_config = fmt->fmt_config;
63 int i;
64
65 dev_dbg(dev, "Format count =%d\n", fmt->fmt_count);
66
67 for (i = 0; i < fmt->fmt_count; i++) {
68 wfmt = &fmt_config->fmt_ext.fmt;
69 dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", wfmt->channels,
70 wfmt->bits_per_sample, wfmt->samples_per_sec);
71 if (wfmt->channels == no_ch && wfmt->samples_per_sec == rate &&
72 wfmt->bits_per_sample == bps) {
73 sp_config = &fmt_config->config;
74
75 return sp_config;
76 }
77
78 fmt_config = (struct nhlt_fmt_cfg *)(fmt_config->config.caps +
79 fmt_config->config.size);
80 }
81
82 return NULL;
83}
84
85static void dump_config(struct device *dev, u32 instance_id, u8 linktype,
86 u8 s_fmt, u8 num_channels, u32 s_rate, u8 dirn, u16 bps)
87{
88 dev_dbg(dev, "Input configuration\n");
89 dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", num_channels, s_fmt, s_rate);
90 dev_dbg(dev, "vbus_id=%d link_type=%d\n", instance_id, linktype);
91 dev_dbg(dev, "bits_per_sample=%d\n", bps);
92}
93
94static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
95 u32 instance_id, u8 link_type, u8 dirn)
96{
97 dev_dbg(dev, "vbus_id=%d link_type=%d dir=%d\n",
98 epnt->virtual_bus_id, epnt->linktype, epnt->direction);
99
100 if ((epnt->virtual_bus_id == instance_id) &&
101 (epnt->linktype == link_type) &&
102 (epnt->direction == dirn))
103 return true;
104 else
105 return false;
106}
107
108struct nhlt_specific_cfg
109*skl_get_ep_blob(struct skl *skl, u32 instance, u8 link_type,
110 u8 s_fmt, u8 num_ch, u32 s_rate, u8 dirn)
111{
112 struct nhlt_fmt *fmt;
113 struct nhlt_endpoint *epnt;
114 struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
115 struct device *dev = bus->dev;
116 struct nhlt_specific_cfg *sp_config;
117 struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
118 u16 bps = num_ch * s_fmt;
119 u8 j;
120
121 dump_config(dev, instance, link_type, s_fmt, num_ch, s_rate, dirn, bps);
122
123 epnt = (struct nhlt_endpoint *)nhlt->desc;
124
125 dev_dbg(dev, "endpoint count =%d\n", nhlt->endpoint_count);
126
127 for (j = 0; j < nhlt->endpoint_count; j++) {
128 if (skl_check_ep_match(dev, epnt, instance, link_type, dirn)) {
129 fmt = (struct nhlt_fmt *)(epnt->config.caps +
130 epnt->config.size);
131 sp_config = skl_get_specific_cfg(dev, fmt, num_ch, s_rate, bps);
132 if (sp_config)
133 return sp_config;
134 }
135
136 epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
137 }
138
139 return NULL;
140}
diff --git a/sound/soc/intel/skylake/skl-nhlt.h b/sound/soc/intel/skylake/skl-nhlt.h
new file mode 100644
index 000000000000..3769f9fefe2b
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.h
@@ -0,0 +1,106 @@
1/*
2 * skl-nhlt.h - Intel HDA Platform NHLT header
3 *
4 * Copyright (C) 2015 Intel Corp
5 * Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 *
19 */
20#ifndef __SKL_NHLT_H__
21#define __SKL_NHLT_H__
22
23#include <linux/acpi.h>
24
25struct wav_fmt {
26 u16 fmt_tag;
27 u16 channels;
28 u32 samples_per_sec;
29 u32 avg_bytes_per_sec;
30 u16 block_align;
31 u16 bits_per_sample;
32 u16 cb_size;
33} __packed;
34
35struct wav_fmt_ext {
36 struct wav_fmt fmt;
37 union samples {
38 u16 valid_bits_per_sample;
39 u16 samples_per_block;
40 u16 reserved;
41 } sample;
42 u32 channel_mask;
43 u8 sub_fmt[16];
44} __packed;
45
46enum nhlt_link_type {
47 NHLT_LINK_HDA = 0,
48 NHLT_LINK_DSP = 1,
49 NHLT_LINK_DMIC = 2,
50 NHLT_LINK_SSP = 3,
51 NHLT_LINK_INVALID
52};
53
54enum nhlt_device_type {
55 NHLT_DEVICE_BT = 0,
56 NHLT_DEVICE_DMIC = 1,
57 NHLT_DEVICE_I2S = 4,
58 NHLT_DEVICE_INVALID
59};
60
61struct nhlt_specific_cfg {
62 u32 size;
63 u8 caps[0];
64} __packed;
65
66struct nhlt_fmt_cfg {
67 struct wav_fmt_ext fmt_ext;
68 struct nhlt_specific_cfg config;
69} __packed;
70
71struct nhlt_fmt {
72 u8 fmt_count;
73 struct nhlt_fmt_cfg fmt_config[0];
74} __packed;
75
76struct nhlt_endpoint {
77 u32 length;
78 u8 linktype;
79 u8 instance_id;
80 u16 vendor_id;
81 u16 device_id;
82 u16 revision_id;
83 u32 subsystem_id;
84 u8 device_type;
85 u8 direction;
86 u8 virtual_bus_id;
87 struct nhlt_specific_cfg config;
88} __packed;
89
90struct nhlt_acpi_table {
91 struct acpi_table_header header;
92 u8 endpoint_count;
93 struct nhlt_endpoint desc[0];
94} __packed;
95
96struct nhlt_resource_desc {
97 u32 extra;
98 u16 flags;
99 u64 addr_spc_gra;
100 u64 min_addr;
101 u64 max_addr;
102 u64 addr_trans_offset;
103 u64 length;
104} __packed;
105
106#endif
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
new file mode 100644
index 000000000000..7d617bf493bc
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -0,0 +1,916 @@
1/*
2 * skl-pcm.c -ASoC HDA Platform driver file implementing PCM functionality
3 *
4 * Copyright (C) 2014-2015 Intel Corp
5 * Author: Jeeja KP <jeeja.kp@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 *
20 */
21
22#include <linux/pci.h>
23#include <linux/pm_runtime.h>
24#include <sound/pcm_params.h>
25#include <sound/soc.h>
26#include "skl.h"
27
28#define HDA_MONO 1
29#define HDA_STEREO 2
30
31static struct snd_pcm_hardware azx_pcm_hw = {
32 .info = (SNDRV_PCM_INFO_MMAP |
33 SNDRV_PCM_INFO_INTERLEAVED |
34 SNDRV_PCM_INFO_BLOCK_TRANSFER |
35 SNDRV_PCM_INFO_MMAP_VALID |
36 SNDRV_PCM_INFO_PAUSE |
37 SNDRV_PCM_INFO_SYNC_START |
38 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
39 SNDRV_PCM_INFO_HAS_LINK_ATIME |
40 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
41 .formats = SNDRV_PCM_FMTBIT_S16_LE,
42 .rates = SNDRV_PCM_RATE_48000,
43 .rate_min = 48000,
44 .rate_max = 48000,
45 .channels_min = 2,
46 .channels_max = 2,
47 .buffer_bytes_max = AZX_MAX_BUF_SIZE,
48 .period_bytes_min = 128,
49 .period_bytes_max = AZX_MAX_BUF_SIZE / 2,
50 .periods_min = 2,
51 .periods_max = AZX_MAX_FRAG,
52 .fifo_size = 0,
53};
54
55static inline
56struct hdac_ext_stream *get_hdac_ext_stream(struct snd_pcm_substream *substream)
57{
58 return substream->runtime->private_data;
59}
60
61static struct hdac_ext_bus *get_bus_ctx(struct snd_pcm_substream *substream)
62{
63 struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
64 struct hdac_stream *hstream = hdac_stream(stream);
65 struct hdac_bus *bus = hstream->bus;
66
67 return hbus_to_ebus(bus);
68}
69
70static int skl_substream_alloc_pages(struct hdac_ext_bus *ebus,
71 struct snd_pcm_substream *substream,
72 size_t size)
73{
74 struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
75
76 hdac_stream(stream)->bufsize = 0;
77 hdac_stream(stream)->period_bytes = 0;
78 hdac_stream(stream)->format_val = 0;
79
80 return snd_pcm_lib_malloc_pages(substream, size);
81}
82
83static int skl_substream_free_pages(struct hdac_bus *bus,
84 struct snd_pcm_substream *substream)
85{
86 return snd_pcm_lib_free_pages(substream);
87}
88
89static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
90 struct snd_pcm_runtime *runtime)
91{
92 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
93
94 /* avoid wrap-around with wall-clock */
95 snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
96 20, 178000000);
97}
98
99static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
100{
101 if (ebus->ppcap)
102 return HDAC_EXT_STREAM_TYPE_HOST;
103 else
104 return HDAC_EXT_STREAM_TYPE_COUPLED;
105}
106
107static int skl_pcm_open(struct snd_pcm_substream *substream,
108 struct snd_soc_dai *dai)
109{
110 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
111 struct hdac_ext_stream *stream;
112 struct snd_pcm_runtime *runtime = substream->runtime;
113 struct skl_dma_params *dma_params;
114 int ret;
115
116 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
117 ret = pm_runtime_get_sync(dai->dev);
118 if (ret)
119 return ret;
120
121 stream = snd_hdac_ext_stream_assign(ebus, substream,
122 skl_get_host_stream_type(ebus));
123 if (stream == NULL)
124 return -EBUSY;
125
126 skl_set_pcm_constrains(ebus, runtime);
127
128 /*
129 * disable WALLCLOCK timestamps for capture streams
130 * until we figure out how to handle digital inputs
131 */
132 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
133 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
134 runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
135 }
136
137 runtime->private_data = stream;
138
139 dma_params = kzalloc(sizeof(*dma_params), GFP_KERNEL);
140 if (!dma_params)
141 return -ENOMEM;
142
143 dma_params->stream_tag = hdac_stream(stream)->stream_tag;
144 snd_soc_dai_set_dma_data(dai, substream, dma_params);
145
146 dev_dbg(dai->dev, "stream tag set in dma params=%d\n",
147 dma_params->stream_tag);
148 snd_pcm_set_sync(substream);
149
150 return 0;
151}
152
153static int skl_get_format(struct snd_pcm_substream *substream,
154 struct snd_soc_dai *dai)
155{
156 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
157 struct skl_dma_params *dma_params;
158 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
159 int format_val = 0;
160
161 if (ebus->ppcap) {
162 struct snd_pcm_runtime *runtime = substream->runtime;
163
164 format_val = snd_hdac_calc_stream_format(runtime->rate,
165 runtime->channels,
166 runtime->format,
167 32, 0);
168 } else {
169 struct snd_soc_dai *codec_dai = rtd->codec_dai;
170
171 dma_params = snd_soc_dai_get_dma_data(codec_dai, substream);
172 if (dma_params)
173 format_val = dma_params->format;
174 }
175
176 return format_val;
177}
178
179static int skl_pcm_prepare(struct snd_pcm_substream *substream,
180 struct snd_soc_dai *dai)
181{
182 struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
183 unsigned int format_val;
184 int err;
185
186 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
187 if (hdac_stream(stream)->prepared) {
188 dev_dbg(dai->dev, "already stream is prepared - returning\n");
189 return 0;
190 }
191
192 format_val = skl_get_format(substream, dai);
193 dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
194 hdac_stream(stream)->stream_tag, format_val);
195 snd_hdac_stream_reset(hdac_stream(stream));
196
197 err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
198 if (err < 0)
199 return err;
200
201 err = snd_hdac_stream_setup(hdac_stream(stream));
202 if (err < 0)
203 return err;
204
205 hdac_stream(stream)->prepared = 1;
206
207 return err;
208}
209
210static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
211 struct snd_pcm_hw_params *params,
212 struct snd_soc_dai *dai)
213{
214 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
215 struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
216 struct snd_pcm_runtime *runtime = substream->runtime;
217 int ret, dma_id;
218
219 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
220 ret = skl_substream_alloc_pages(ebus, substream,
221 params_buffer_bytes(params));
222 if (ret < 0)
223 return ret;
224
225 dev_dbg(dai->dev, "format_val, rate=%d, ch=%d, format=%d\n",
226 runtime->rate, runtime->channels, runtime->format);
227
228 dma_id = hdac_stream(stream)->stream_tag - 1;
229 dev_dbg(dai->dev, "dma_id=%d\n", dma_id);
230
231 return 0;
232}
233
234static void skl_pcm_close(struct snd_pcm_substream *substream,
235 struct snd_soc_dai *dai)
236{
237 struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
238 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
239 struct skl_dma_params *dma_params = NULL;
240
241 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
242
243 snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(ebus));
244
245 dma_params = snd_soc_dai_get_dma_data(dai, substream);
246 /*
247 * now we should set this to NULL as we are freeing by the
248 * dma_params
249 */
250 snd_soc_dai_set_dma_data(dai, substream, NULL);
251
252 pm_runtime_mark_last_busy(dai->dev);
253 pm_runtime_put_autosuspend(dai->dev);
254 kfree(dma_params);
255}
256
257static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
258 struct snd_soc_dai *dai)
259{
260 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
261 struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
262
263 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
264
265 snd_hdac_stream_cleanup(hdac_stream(stream));
266 hdac_stream(stream)->prepared = 0;
267
268 return skl_substream_free_pages(ebus_to_hbus(ebus), substream);
269}
270
271static int skl_link_hw_params(struct snd_pcm_substream *substream,
272 struct snd_pcm_hw_params *params,
273 struct snd_soc_dai *dai)
274{
275 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
276 struct hdac_ext_stream *link_dev;
277 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
278 struct skl_dma_params *dma_params;
279 struct snd_soc_dai *codec_dai = rtd->codec_dai;
280 int dma_id;
281
282 pr_debug("%s\n", __func__);
283 link_dev = snd_hdac_ext_stream_assign(ebus, substream,
284 HDAC_EXT_STREAM_TYPE_LINK);
285 if (!link_dev)
286 return -EBUSY;
287
288 snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
289
290 /* set the stream tag in the codec dai dma params */
291 dma_params = (struct skl_dma_params *)
292 snd_soc_dai_get_dma_data(codec_dai, substream);
293 if (dma_params)
294 dma_params->stream_tag = hdac_stream(link_dev)->stream_tag;
295 snd_soc_dai_set_dma_data(codec_dai, substream, (void *)dma_params);
296 dma_id = hdac_stream(link_dev)->stream_tag - 1;
297
298 return 0;
299}
300
301static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
302 struct snd_soc_dai *dai)
303{
304 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
305 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
306 struct hdac_ext_stream *link_dev =
307 snd_soc_dai_get_dma_data(dai, substream);
308 unsigned int format_val = 0;
309 struct skl_dma_params *dma_params;
310 struct snd_soc_dai *codec_dai = rtd->codec_dai;
311 struct snd_pcm_hw_params *params;
312 struct snd_interval *channels, *rate;
313 struct hdac_ext_link *link;
314
315 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
316 if (link_dev->link_prepared) {
317 dev_dbg(dai->dev, "already stream is prepared - returning\n");
318 return 0;
319 }
320 params = devm_kzalloc(dai->dev, sizeof(*params), GFP_KERNEL);
321 if (params == NULL)
322 return -ENOMEM;
323
324 channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
325 channels->min = channels->max = substream->runtime->channels;
326 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
327 rate->min = rate->max = substream->runtime->rate;
328 snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
329 SNDRV_PCM_HW_PARAM_FIRST_MASK],
330 substream->runtime->format);
331
332
333 dma_params = (struct skl_dma_params *)
334 snd_soc_dai_get_dma_data(codec_dai, substream);
335 if (dma_params)
336 format_val = dma_params->format;
337 dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d codec_dai_name=%s\n",
338 hdac_stream(link_dev)->stream_tag, format_val, codec_dai->name);
339
340 snd_hdac_ext_link_stream_reset(link_dev);
341
342 snd_hdac_ext_link_stream_setup(link_dev, format_val);
343
344 link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
345 if (!link)
346 return -EINVAL;
347
348 snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
349 link_dev->link_prepared = 1;
350
351 return 0;
352}
353
354static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
355 int cmd, struct snd_soc_dai *dai)
356{
357 struct hdac_ext_stream *link_dev =
358 snd_soc_dai_get_dma_data(dai, substream);
359
360 dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
361 switch (cmd) {
362 case SNDRV_PCM_TRIGGER_START:
363 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
364 case SNDRV_PCM_TRIGGER_RESUME:
365 snd_hdac_ext_link_stream_start(link_dev);
366 break;
367
368 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
369 case SNDRV_PCM_TRIGGER_SUSPEND:
370 case SNDRV_PCM_TRIGGER_STOP:
371 snd_hdac_ext_link_stream_clear(link_dev);
372 break;
373
374 default:
375 return -EINVAL;
376 }
377 return 0;
378}
379
380static int skl_link_hw_free(struct snd_pcm_substream *substream,
381 struct snd_soc_dai *dai)
382{
383 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
384 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
385 struct hdac_ext_stream *link_dev =
386 snd_soc_dai_get_dma_data(dai, substream);
387 struct hdac_ext_link *link;
388
389 dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
390
391 link_dev->link_prepared = 0;
392
393 link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
394 if (!link)
395 return -EINVAL;
396
397 snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
398 snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
399 return 0;
400}
401
402static int skl_hda_be_startup(struct snd_pcm_substream *substream,
403 struct snd_soc_dai *dai)
404{
405 return pm_runtime_get_sync(dai->dev);
406}
407
408static void skl_hda_be_shutdown(struct snd_pcm_substream *substream,
409 struct snd_soc_dai *dai)
410{
411 pm_runtime_mark_last_busy(dai->dev);
412 pm_runtime_put_autosuspend(dai->dev);
413}
414
415static struct snd_soc_dai_ops skl_pcm_dai_ops = {
416 .startup = skl_pcm_open,
417 .shutdown = skl_pcm_close,
418 .prepare = skl_pcm_prepare,
419 .hw_params = skl_pcm_hw_params,
420 .hw_free = skl_pcm_hw_free,
421};
422
423static struct snd_soc_dai_ops skl_dmic_dai_ops = {
424 .startup = skl_hda_be_startup,
425 .shutdown = skl_hda_be_shutdown,
426};
427
428static struct snd_soc_dai_ops skl_link_dai_ops = {
429 .startup = skl_hda_be_startup,
430 .prepare = skl_link_pcm_prepare,
431 .hw_params = skl_link_hw_params,
432 .hw_free = skl_link_hw_free,
433 .trigger = skl_link_pcm_trigger,
434 .shutdown = skl_hda_be_shutdown,
435};
436
437static struct snd_soc_dai_driver skl_platform_dai[] = {
438{
439 .name = "System Pin",
440 .ops = &skl_pcm_dai_ops,
441 .playback = {
442 .stream_name = "System Playback",
443 .channels_min = HDA_MONO,
444 .channels_max = HDA_STEREO,
445 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
446 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
447 },
448 .capture = {
449 .stream_name = "System Capture",
450 .channels_min = HDA_MONO,
451 .channels_max = HDA_STEREO,
452 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
453 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
454 },
455},
456{
457 .name = "Reference Pin",
458 .ops = &skl_pcm_dai_ops,
459 .capture = {
460 .stream_name = "Reference Capture",
461 .channels_min = HDA_MONO,
462 .channels_max = HDA_STEREO,
463 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
464 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
465 },
466},
467{
468 .name = "Deepbuffer Pin",
469 .ops = &skl_pcm_dai_ops,
470 .playback = {
471 .stream_name = "Deepbuffer Playback",
472 .channels_min = HDA_STEREO,
473 .channels_max = HDA_STEREO,
474 .rates = SNDRV_PCM_RATE_48000,
475 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
476 },
477},
478{
479 .name = "LowLatency Pin",
480 .ops = &skl_pcm_dai_ops,
481 .playback = {
482 .stream_name = "Low Latency Playback",
483 .channels_min = HDA_STEREO,
484 .channels_max = HDA_STEREO,
485 .rates = SNDRV_PCM_RATE_48000,
486 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
487 },
488},
489/* BE CPU Dais */
490{
491 .name = "iDisp Pin",
492 .ops = &skl_link_dai_ops,
493 .playback = {
494 .stream_name = "iDisp Tx",
495 .channels_min = HDA_STEREO,
496 .channels_max = HDA_STEREO,
497 .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
498 .formats = SNDRV_PCM_FMTBIT_S16_LE,
499 },
500},
501{
502 .name = "DMIC01 Pin",
503 .ops = &skl_dmic_dai_ops,
504 .capture = {
505 .stream_name = "DMIC01 Rx",
506 .channels_min = HDA_STEREO,
507 .channels_max = HDA_STEREO,
508 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
509 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
510 },
511},
512{
513 .name = "DMIC23 Pin",
514 .ops = &skl_dmic_dai_ops,
515 .capture = {
516 .stream_name = "DMIC23 Rx",
517 .channels_min = HDA_STEREO,
518 .channels_max = HDA_STEREO,
519 .rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
520 .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
521 },
522},
523{
524 .name = "HD-Codec Pin",
525 .ops = &skl_link_dai_ops,
526 .playback = {
527 .stream_name = "HD-Codec Tx",
528 .channels_min = HDA_STEREO,
529 .channels_max = HDA_STEREO,
530 .rates = SNDRV_PCM_RATE_48000,
531 .formats = SNDRV_PCM_FMTBIT_S16_LE,
532 },
533 .capture = {
534 .stream_name = "HD-Codec Rx",
535 .channels_min = HDA_STEREO,
536 .channels_max = HDA_STEREO,
537 .rates = SNDRV_PCM_RATE_48000,
538 .formats = SNDRV_PCM_FMTBIT_S16_LE,
539 },
540},
541{
542 .name = "HD-Codec-SPK Pin",
543 .ops = &skl_link_dai_ops,
544 .playback = {
545 .stream_name = "HD-Codec-SPK Tx",
546 .channels_min = HDA_STEREO,
547 .channels_max = HDA_STEREO,
548 .rates = SNDRV_PCM_RATE_48000,
549 .formats = SNDRV_PCM_FMTBIT_S16_LE,
550 },
551},
552{
553 .name = "HD-Codec-AMIC Pin",
554 .ops = &skl_link_dai_ops,
555 .capture = {
556 .stream_name = "HD-Codec-AMIC Rx",
557 .channels_min = HDA_STEREO,
558 .channels_max = HDA_STEREO,
559 .rates = SNDRV_PCM_RATE_48000,
560 .formats = SNDRV_PCM_FMTBIT_S16_LE,
561 },
562},
563};
564
565static int skl_platform_open(struct snd_pcm_substream *substream)
566{
567 struct snd_pcm_runtime *runtime;
568 struct snd_soc_pcm_runtime *rtd = substream->private_data;
569 struct snd_soc_dai_link *dai_link = rtd->dai_link;
570
571 dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
572 dai_link->cpu_dai_name);
573
574 runtime = substream->runtime;
575 snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
576
577 return 0;
578}
579
580static int skl_pcm_trigger(struct snd_pcm_substream *substream,
581 int cmd)
582{
583 struct hdac_ext_bus *ebus = get_bus_ctx(substream);
584 struct hdac_bus *bus = ebus_to_hbus(ebus);
585 struct hdac_ext_stream *stream;
586 struct snd_pcm_substream *s;
587 bool start;
588 int sbits = 0;
589 unsigned long cookie;
590 struct hdac_stream *hstr;
591
592 stream = get_hdac_ext_stream(substream);
593 hstr = hdac_stream(stream);
594
595 dev_dbg(bus->dev, "In %s cmd=%d\n", __func__, cmd);
596
597 if (!hstr->prepared)
598 return -EPIPE;
599
600 switch (cmd) {
601 case SNDRV_PCM_TRIGGER_START:
602 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
603 case SNDRV_PCM_TRIGGER_RESUME:
604 start = true;
605 break;
606
607 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
608 case SNDRV_PCM_TRIGGER_SUSPEND:
609 case SNDRV_PCM_TRIGGER_STOP:
610 start = false;
611 break;
612
613 default:
614 return -EINVAL;
615 }
616
617 snd_pcm_group_for_each_entry(s, substream) {
618 if (s->pcm->card != substream->pcm->card)
619 continue;
620 stream = get_hdac_ext_stream(s);
621 sbits |= 1 << hdac_stream(stream)->index;
622 snd_pcm_trigger_done(s, substream);
623 }
624
625 spin_lock_irqsave(&bus->reg_lock, cookie);
626
627 /* first, set SYNC bits of corresponding streams */
628 snd_hdac_stream_sync_trigger(hstr, true, sbits, AZX_REG_SSYNC);
629
630 snd_pcm_group_for_each_entry(s, substream) {
631 if (s->pcm->card != substream->pcm->card)
632 continue;
633 stream = get_hdac_ext_stream(s);
634 if (start)
635 snd_hdac_stream_start(hdac_stream(stream), true);
636 else
637 snd_hdac_stream_stop(hdac_stream(stream));
638 }
639 spin_unlock_irqrestore(&bus->reg_lock, cookie);
640
641 snd_hdac_stream_sync(hstr, start, sbits);
642
643 spin_lock_irqsave(&bus->reg_lock, cookie);
644
645 /* reset SYNC bits */
646 snd_hdac_stream_sync_trigger(hstr, false, sbits, AZX_REG_SSYNC);
647 if (start)
648 snd_hdac_stream_timecounter_init(hstr, sbits);
649 spin_unlock_irqrestore(&bus->reg_lock, cookie);
650
651 return 0;
652}
653
654static int skl_dsp_trigger(struct snd_pcm_substream *substream,
655 int cmd)
656{
657 struct hdac_ext_bus *ebus = get_bus_ctx(substream);
658 struct hdac_bus *bus = ebus_to_hbus(ebus);
659 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
660 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
661 struct hdac_ext_stream *stream;
662 int start;
663 unsigned long cookie;
664 struct hdac_stream *hstr;
665
666 dev_dbg(bus->dev, "In %s cmd=%d streamname=%s\n", __func__, cmd, cpu_dai->name);
667
668 stream = get_hdac_ext_stream(substream);
669 hstr = hdac_stream(stream);
670
671 if (!hstr->prepared)
672 return -EPIPE;
673
674 switch (cmd) {
675 case SNDRV_PCM_TRIGGER_START:
676 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
677 case SNDRV_PCM_TRIGGER_RESUME:
678 start = 1;
679 break;
680
681 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
682 case SNDRV_PCM_TRIGGER_SUSPEND:
683 case SNDRV_PCM_TRIGGER_STOP:
684 start = 0;
685 break;
686
687 default:
688 return -EINVAL;
689 }
690
691 spin_lock_irqsave(&bus->reg_lock, cookie);
692
693 if (start)
694 snd_hdac_stream_start(hdac_stream(stream), true);
695 else
696 snd_hdac_stream_stop(hdac_stream(stream));
697
698 if (start)
699 snd_hdac_stream_timecounter_init(hstr, 0);
700
701 spin_unlock_irqrestore(&bus->reg_lock, cookie);
702
703 return 0;
704}
705static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
706 int cmd)
707{
708 struct hdac_ext_bus *ebus = get_bus_ctx(substream);
709
710 if (ebus->ppcap)
711 return skl_dsp_trigger(substream, cmd);
712 else
713 return skl_pcm_trigger(substream, cmd);
714}
715
716/* calculate runtime delay from LPIB */
717static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
718 struct hdac_ext_stream *sstream,
719 unsigned int pos)
720{
721 struct hdac_bus *bus = ebus_to_hbus(ebus);
722 struct hdac_stream *hstream = hdac_stream(sstream);
723 struct snd_pcm_substream *substream = hstream->substream;
724 int stream = substream->stream;
725 unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
726 int delay;
727
728 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
729 delay = pos - lpib_pos;
730 else
731 delay = lpib_pos - pos;
732
733 if (delay < 0) {
734 if (delay >= hstream->delay_negative_threshold)
735 delay = 0;
736 else
737 delay += hstream->bufsize;
738 }
739
740 if (delay >= hstream->period_bytes) {
741 dev_info(bus->dev,
742 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
743 delay, hstream->period_bytes);
744 delay = 0;
745 }
746
747 return bytes_to_frames(substream->runtime, delay);
748}
749
750static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
751 int codec_delay)
752{
753 struct hdac_stream *hstr = hdac_stream(hstream);
754 struct snd_pcm_substream *substream = hstr->substream;
755 struct hdac_ext_bus *ebus = get_bus_ctx(substream);
756 unsigned int pos;
757 int delay;
758
759 /* use the position buffer as default */
760 pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
761
762 if (pos >= hdac_stream(hstream)->bufsize)
763 pos = 0;
764
765 if (substream->runtime) {
766 delay = skl_get_delay_from_lpib(ebus, hstream, pos)
767 + codec_delay;
768 substream->runtime->delay += delay;
769 }
770
771 return pos;
772}
773
774static snd_pcm_uframes_t skl_platform_pcm_pointer
775 (struct snd_pcm_substream *substream)
776{
777 struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
778
779 return bytes_to_frames(substream->runtime,
780 skl_get_position(hstream, 0));
781}
782
783static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
784 u64 nsec)
785{
786 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
787 struct snd_soc_dai *codec_dai = rtd->codec_dai;
788 u64 codec_frames, codec_nsecs;
789
790 if (!codec_dai->driver->ops->delay)
791 return nsec;
792
793 codec_frames = codec_dai->driver->ops->delay(substream, codec_dai);
794 codec_nsecs = div_u64(codec_frames * 1000000000LL,
795 substream->runtime->rate);
796
797 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
798 return nsec + codec_nsecs;
799
800 return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
801}
802
803static int skl_get_time_info(struct snd_pcm_substream *substream,
804 struct timespec *system_ts, struct timespec *audio_ts,
805 struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
806 struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
807{
808 struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream);
809 struct hdac_stream *hstr = hdac_stream(sstream);
810 u64 nsec;
811
812 if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
813 (audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
814
815 snd_pcm_gettime(substream->runtime, system_ts);
816
817 nsec = timecounter_read(&hstr->tc);
818 nsec = div_u64(nsec, 3); /* can be optimized */
819 if (audio_tstamp_config->report_delay)
820 nsec = skl_adjust_codec_delay(substream, nsec);
821
822 *audio_ts = ns_to_timespec(nsec);
823
824 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
825 audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */
826 audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */
827
828 } else {
829 audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
830 }
831
832 return 0;
833}
834
835static struct snd_pcm_ops skl_platform_ops = {
836 .open = skl_platform_open,
837 .ioctl = snd_pcm_lib_ioctl,
838 .trigger = skl_platform_pcm_trigger,
839 .pointer = skl_platform_pcm_pointer,
840 .get_time_info = skl_get_time_info,
841 .mmap = snd_pcm_lib_default_mmap,
842 .page = snd_pcm_sgbuf_ops_page,
843};
844
845static void skl_pcm_free(struct snd_pcm *pcm)
846{
847 snd_pcm_lib_preallocate_free_for_all(pcm);
848}
849
850#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
851
852static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
853{
854 struct snd_soc_dai *dai = rtd->cpu_dai;
855 struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
856 struct snd_pcm *pcm = rtd->pcm;
857 unsigned int size;
858 int retval = 0;
859 struct skl *skl = ebus_to_skl(ebus);
860
861 if (dai->driver->playback.channels_min ||
862 dai->driver->capture.channels_min) {
863 /* buffer pre-allocation */
864 size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
865 if (size > MAX_PREALLOC_SIZE)
866 size = MAX_PREALLOC_SIZE;
867 retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
868 SNDRV_DMA_TYPE_DEV_SG,
869 snd_dma_pci_data(skl->pci),
870 size, MAX_PREALLOC_SIZE);
871 if (retval) {
872 dev_err(dai->dev, "dma buffer allocationf fail\n");
873 return retval;
874 }
875 }
876
877 return retval;
878}
879
880static struct snd_soc_platform_driver skl_platform_drv = {
881 .ops = &skl_platform_ops,
882 .pcm_new = skl_pcm_new,
883 .pcm_free = skl_pcm_free,
884};
885
886static const struct snd_soc_component_driver skl_component = {
887 .name = "pcm",
888};
889
890int skl_platform_register(struct device *dev)
891{
892 int ret;
893
894 ret = snd_soc_register_platform(dev, &skl_platform_drv);
895 if (ret) {
896 dev_err(dev, "soc platform registration failed %d\n", ret);
897 return ret;
898 }
899 ret = snd_soc_register_component(dev, &skl_component,
900 skl_platform_dai,
901 ARRAY_SIZE(skl_platform_dai));
902 if (ret) {
903 dev_err(dev, "soc component registration failed %d\n", ret);
904 snd_soc_unregister_platform(dev);
905 }
906
907 return ret;
908
909}
910
911int skl_platform_unregister(struct device *dev)
912{
913 snd_soc_unregister_component(dev);
914 snd_soc_unregister_platform(dev);
915 return 0;
916}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
new file mode 100644
index 000000000000..44748ba98da2
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -0,0 +1,327 @@
1/*
2 * skl-sst-cldma.c - Code Loader DMA handler
3 *
4 * Copyright (C) 2015, Intel Corporation.
5 * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/device.h>
19#include <linux/mm.h>
20#include <linux/kthread.h>
21#include "../common/sst-dsp.h"
22#include "../common/sst-dsp-priv.h"
23
24static void skl_cldma_int_enable(struct sst_dsp *ctx)
25{
26 sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
27 SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
28}
29
30void skl_cldma_int_disable(struct sst_dsp *ctx)
31{
32 sst_dsp_shim_update_bits_unlocked(ctx,
33 SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
34}
35
36/* Code loader helper APIs */
37static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
38 struct snd_dma_buffer *dmab_data,
39 u32 **bdlp, int size, int with_ioc)
40{
41 u32 *bdl = *bdlp;
42
43 ctx->cl_dev.frags = 0;
44 while (size > 0) {
45 phys_addr_t addr = virt_to_phys(dmab_data->area +
46 (ctx->cl_dev.frags * ctx->cl_dev.bufsize));
47
48 bdl[0] = cpu_to_le32(lower_32_bits(addr));
49 bdl[1] = cpu_to_le32(upper_32_bits(addr));
50
51 bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
52
53 size -= ctx->cl_dev.bufsize;
54 bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
55
56 bdl += 4;
57 ctx->cl_dev.frags++;
58 }
59}
60
61/*
62 * Setup controller
63 * Configure the registers to update the dma buffer address and
64 * enable interrupts.
65 * Note: Using the channel 1 for transfer
66 */
67static void skl_cldma_setup_controller(struct sst_dsp *ctx,
68 struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
69 u32 count)
70{
71 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
72 CL_SD_BDLPLBA(dmab_bdl->addr));
73 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
74 CL_SD_BDLPUBA(dmab_bdl->addr));
75
76 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
77 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
78 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
79 CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
80 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
81 CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
82 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
83 CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
84 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
85 CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
86}
87
88static void skl_cldma_setup_spb(struct sst_dsp *ctx,
89 unsigned int size, bool enable)
90{
91 if (enable)
92 sst_dsp_shim_update_bits_unlocked(ctx,
93 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
94 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
95 CL_SPBFIFO_SPBFCCTL_SPIBE(1));
96
97 sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
98}
99
100static void skl_cldma_cleanup_spb(struct sst_dsp *ctx)
101{
102 sst_dsp_shim_update_bits_unlocked(ctx,
103 SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
104 CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
105 CL_SPBFIFO_SPBFCCTL_SPIBE(0));
106
107 sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
108}
109
110static void skl_cldma_trigger(struct sst_dsp *ctx, bool enable)
111{
112 if (enable)
113 sst_dsp_shim_update_bits_unlocked(ctx,
114 SKL_ADSP_REG_CL_SD_CTL,
115 CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(1));
116 else
117 sst_dsp_shim_update_bits_unlocked(ctx,
118 SKL_ADSP_REG_CL_SD_CTL,
119 CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(0));
120}
121
122static void skl_cldma_cleanup(struct sst_dsp *ctx)
123{
124 skl_cldma_cleanup_spb(ctx);
125
126 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
127 CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
128 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
129 CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
130 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
131 CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
132 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
133 CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
134
135 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
136 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
137
138 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
139 sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
140}
141
142static int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
143{
144 int ret = 0;
145
146 if (!wait_event_timeout(ctx->cl_dev.wait_queue,
147 ctx->cl_dev.wait_condition,
148 msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
149 dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
150 ret = -EIO;
151 goto cleanup;
152 }
153
154 dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
155 if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
156 dev_err(ctx->dev, "%s: DMA Error\n", __func__);
157 ret = -EIO;
158 }
159
160cleanup:
161 ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
162 return ret;
163}
164
165static void skl_cldma_stop(struct sst_dsp *ctx)
166{
167 ctx->cl_dev.ops.cl_trigger(ctx, false);
168}
169
170static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
171 const void *curr_pos, bool intr_enable, bool trigger)
172{
173 dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
174 dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
175 ctx->cl_dev.dma_buffer_offset, trigger);
176 dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
177
178 memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
179 curr_pos, size);
180
181 if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
182 ctx->cl_dev.dma_buffer_offset = 0;
183 else
184 ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
185
186 ctx->cl_dev.wait_condition = false;
187
188 if (intr_enable)
189 skl_cldma_int_enable(ctx);
190
191 ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
192 if (trigger)
193 ctx->cl_dev.ops.cl_trigger(ctx, true);
194}
195
196/*
197 * The CL dma doesn't have any way to update the transfer status until a BDL
198 * buffer is fully transferred
199 *
200 * So Copying is divided in two parts.
201 * 1. Interrupt on buffer done where the size to be transferred is more than
202 * ring buffer size.
203 * 2. Polling on fw register to identify if data left to transferred doesn't
204 * fill the ring buffer. Caller takes care of polling the required status
205 * register to identify the transfer status.
206 */
207static int
208skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
209{
210 int ret = 0;
211 bool start = true;
212 unsigned int excess_bytes;
213 u32 size;
214 unsigned int bytes_left = total_size;
215 const void *curr_pos = bin;
216
217 if (total_size <= 0)
218 return -EINVAL;
219
220 dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
221
222 while (bytes_left) {
223 if (bytes_left > ctx->cl_dev.bufsize) {
224
225 /*
226 * dma transfers only till the write pointer as
227 * updated in spib
228 */
229 if (ctx->cl_dev.curr_spib_pos == 0)
230 ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
231
232 size = ctx->cl_dev.bufsize;
233 skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
234
235 start = false;
236 ret = skl_cldma_wait_interruptible(ctx);
237 if (ret < 0) {
238 skl_cldma_stop(ctx);
239 return ret;
240 }
241
242 } else {
243 skl_cldma_int_disable(ctx);
244
245 if ((ctx->cl_dev.curr_spib_pos + bytes_left)
246 <= ctx->cl_dev.bufsize) {
247 ctx->cl_dev.curr_spib_pos += bytes_left;
248 } else {
249 excess_bytes = bytes_left -
250 (ctx->cl_dev.bufsize -
251 ctx->cl_dev.curr_spib_pos);
252 ctx->cl_dev.curr_spib_pos = excess_bytes;
253 }
254
255 size = bytes_left;
256 skl_cldma_fill_buffer(ctx, size,
257 curr_pos, false, start);
258 }
259 bytes_left -= size;
260 curr_pos = curr_pos + size;
261 }
262
263 return ret;
264}
265
266void skl_cldma_process_intr(struct sst_dsp *ctx)
267{
268 u8 cl_dma_intr_status;
269
270 cl_dma_intr_status =
271 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
272
273 if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
274 ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
275 else
276 ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
277
278 ctx->cl_dev.wait_condition = true;
279 wake_up(&ctx->cl_dev.wait_queue);
280}
281
282int skl_cldma_prepare(struct sst_dsp *ctx)
283{
284 int ret;
285 u32 *bdl;
286
287 ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
288
289 /* Allocate cl ops */
290 ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
291 ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
292 ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
293 ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
294 ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger;
295 ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
296 ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
297 ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
298
299 /* Allocate buffer*/
300 ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
301 &ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
302 if (ret < 0) {
303 dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
304 return ret;
305 }
306 /* Setup Code loader BDL */
307 ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
308 &ctx->cl_dev.dmab_bdl, PAGE_SIZE);
309 if (ret < 0) {
310 dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
311 ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
312 return ret;
313 }
314 bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
315
316 /* Allocate BDLs */
317 ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
318 &bdl, ctx->cl_dev.bufsize, 1);
319 ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
320 ctx->cl_dev.bufsize, ctx->cl_dev.frags);
321
322 ctx->cl_dev.curr_spib_pos = 0;
323 ctx->cl_dev.dma_buffer_offset = 0;
324 init_waitqueue_head(&ctx->cl_dev.wait_queue);
325
326 return ret;
327}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.h b/sound/soc/intel/skylake/skl-sst-cldma.h
new file mode 100644
index 000000000000..99e4c86b6358
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.h
@@ -0,0 +1,251 @@
1/*
2 * Intel Code Loader DMA support
3 *
4 * Copyright (C) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#ifndef SKL_SST_CLDMA_H_
17#define SKL_SST_CLDMA_H_
18
19#define FW_CL_STREAM_NUMBER 0x1
20
21#define DMA_ADDRESS_128_BITS_ALIGNMENT 7
22#define BDL_ALIGN(x) (x >> DMA_ADDRESS_128_BITS_ALIGNMENT)
23
24#define SKL_ADSPIC_CL_DMA 0x2
25#define SKL_ADSPIS_CL_DMA 0x2
26#define SKL_CL_DMA_SD_INT_DESC_ERR 0x10 /* Descriptor error interrupt */
27#define SKL_CL_DMA_SD_INT_FIFO_ERR 0x08 /* FIFO error interrupt */
28#define SKL_CL_DMA_SD_INT_COMPLETE 0x04 /* Buffer completion interrupt */
29
30/* Intel HD Audio Code Loader DMA Registers */
31
32#define HDA_ADSP_LOADER_BASE 0x80
33
34/* Stream Registers */
35#define SKL_ADSP_REG_CL_SD_CTL (HDA_ADSP_LOADER_BASE + 0x00)
36#define SKL_ADSP_REG_CL_SD_STS (HDA_ADSP_LOADER_BASE + 0x03)
37#define SKL_ADSP_REG_CL_SD_LPIB (HDA_ADSP_LOADER_BASE + 0x04)
38#define SKL_ADSP_REG_CL_SD_CBL (HDA_ADSP_LOADER_BASE + 0x08)
39#define SKL_ADSP_REG_CL_SD_LVI (HDA_ADSP_LOADER_BASE + 0x0c)
40#define SKL_ADSP_REG_CL_SD_FIFOW (HDA_ADSP_LOADER_BASE + 0x0e)
41#define SKL_ADSP_REG_CL_SD_FIFOSIZE (HDA_ADSP_LOADER_BASE + 0x10)
42#define SKL_ADSP_REG_CL_SD_FORMAT (HDA_ADSP_LOADER_BASE + 0x12)
43#define SKL_ADSP_REG_CL_SD_FIFOL (HDA_ADSP_LOADER_BASE + 0x14)
44#define SKL_ADSP_REG_CL_SD_BDLPL (HDA_ADSP_LOADER_BASE + 0x18)
45#define SKL_ADSP_REG_CL_SD_BDLPU (HDA_ADSP_LOADER_BASE + 0x1c)
46
47/* CL: Software Position Based FIFO Capability Registers */
48#define SKL_ADSP_REG_CL_SPBFIFO (HDA_ADSP_LOADER_BASE + 0x20)
49#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCH (SKL_ADSP_REG_CL_SPBFIFO + 0x0)
50#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL (SKL_ADSP_REG_CL_SPBFIFO + 0x4)
51#define SKL_ADSP_REG_CL_SPBFIFO_SPIB (SKL_ADSP_REG_CL_SPBFIFO + 0x8)
52#define SKL_ADSP_REG_CL_SPBFIFO_MAXFIFOS (SKL_ADSP_REG_CL_SPBFIFO + 0xc)
53
54/* CL: Stream Descriptor x Control */
55
56/* Stream Reset */
57#define CL_SD_CTL_SRST_SHIFT 0
58#define CL_SD_CTL_SRST_MASK (1 << CL_SD_CTL_SRST_SHIFT)
59#define CL_SD_CTL_SRST(x) \
60 ((x << CL_SD_CTL_SRST_SHIFT) & CL_SD_CTL_SRST_MASK)
61
62/* Stream Run */
63#define CL_SD_CTL_RUN_SHIFT 1
64#define CL_SD_CTL_RUN_MASK (1 << CL_SD_CTL_RUN_SHIFT)
65#define CL_SD_CTL_RUN(x) \
66 ((x << CL_SD_CTL_RUN_SHIFT) & CL_SD_CTL_RUN_MASK)
67
68/* Interrupt On Completion Enable */
69#define CL_SD_CTL_IOCE_SHIFT 2
70#define CL_SD_CTL_IOCE_MASK (1 << CL_SD_CTL_IOCE_SHIFT)
71#define CL_SD_CTL_IOCE(x) \
72 ((x << CL_SD_CTL_IOCE_SHIFT) & CL_SD_CTL_IOCE_MASK)
73
74/* FIFO Error Interrupt Enable */
75#define CL_SD_CTL_FEIE_SHIFT 3
76#define CL_SD_CTL_FEIE_MASK (1 << CL_SD_CTL_FEIE_SHIFT)
77#define CL_SD_CTL_FEIE(x) \
78 ((x << CL_SD_CTL_FEIE_SHIFT) & CL_SD_CTL_FEIE_MASK)
79
80/* Descriptor Error Interrupt Enable */
81#define CL_SD_CTL_DEIE_SHIFT 4
82#define CL_SD_CTL_DEIE_MASK (1 << CL_SD_CTL_DEIE_SHIFT)
83#define CL_SD_CTL_DEIE(x) \
84 ((x << CL_SD_CTL_DEIE_SHIFT) & CL_SD_CTL_DEIE_MASK)
85
86/* FIFO Limit Change */
87#define CL_SD_CTL_FIFOLC_SHIFT 5
88#define CL_SD_CTL_FIFOLC_MASK (1 << CL_SD_CTL_FIFOLC_SHIFT)
89#define CL_SD_CTL_FIFOLC(x) \
90 ((x << CL_SD_CTL_FIFOLC_SHIFT) & CL_SD_CTL_FIFOLC_MASK)
91
92/* Stripe Control */
93#define CL_SD_CTL_STRIPE_SHIFT 16
94#define CL_SD_CTL_STRIPE_MASK (0x3 << CL_SD_CTL_STRIPE_SHIFT)
95#define CL_SD_CTL_STRIPE(x) \
96 ((x << CL_SD_CTL_STRIPE_SHIFT) & CL_SD_CTL_STRIPE_MASK)
97
98/* Traffic Priority */
99#define CL_SD_CTL_TP_SHIFT 18
100#define CL_SD_CTL_TP_MASK (1 << CL_SD_CTL_TP_SHIFT)
101#define CL_SD_CTL_TP(x) \
102 ((x << CL_SD_CTL_TP_SHIFT) & CL_SD_CTL_TP_MASK)
103
104/* Bidirectional Direction Control */
105#define CL_SD_CTL_DIR_SHIFT 19
106#define CL_SD_CTL_DIR_MASK (1 << CL_SD_CTL_DIR_SHIFT)
107#define CL_SD_CTL_DIR(x) \
108 ((x << CL_SD_CTL_DIR_SHIFT) & CL_SD_CTL_DIR_MASK)
109
110/* Stream Number */
111#define CL_SD_CTL_STRM_SHIFT 20
112#define CL_SD_CTL_STRM_MASK (0xf << CL_SD_CTL_STRM_SHIFT)
113#define CL_SD_CTL_STRM(x) \
114 ((x << CL_SD_CTL_STRM_SHIFT) & CL_SD_CTL_STRM_MASK)
115
116/* CL: Stream Descriptor x Status */
117
118/* Buffer Completion Interrupt Status */
119#define CL_SD_STS_BCIS(x) CL_SD_CTL_IOCE(x)
120
121/* FIFO Error */
122#define CL_SD_STS_FIFOE(x) CL_SD_CTL_FEIE(x)
123
124/* Descriptor Error */
125#define CL_SD_STS_DESE(x) CL_SD_CTL_DEIE(x)
126
127/* FIFO Ready */
128#define CL_SD_STS_FIFORDY(x) CL_SD_CTL_FIFOLC(x)
129
130
131/* CL: Stream Descriptor x Last Valid Index */
132#define CL_SD_LVI_SHIFT 0
133#define CL_SD_LVI_MASK (0xff << CL_SD_LVI_SHIFT)
134#define CL_SD_LVI(x) ((x << CL_SD_LVI_SHIFT) & CL_SD_LVI_MASK)
135
136/* CL: Stream Descriptor x FIFO Eviction Watermark */
137#define CL_SD_FIFOW_SHIFT 0
138#define CL_SD_FIFOW_MASK (0x7 << CL_SD_FIFOW_SHIFT)
139#define CL_SD_FIFOW(x) \
140 ((x << CL_SD_FIFOW_SHIFT) & CL_SD_FIFOW_MASK)
141
142/* CL: Stream Descriptor x Buffer Descriptor List Pointer Lower Base Address */
143
144/* Protect Bits */
145#define CL_SD_BDLPLBA_PROT_SHIFT 0
146#define CL_SD_BDLPLBA_PROT_MASK (1 << CL_SD_BDLPLBA_PROT_SHIFT)
147#define CL_SD_BDLPLBA_PROT(x) \
148 ((x << CL_SD_BDLPLBA_PROT_SHIFT) & CL_SD_BDLPLBA_PROT_MASK)
149
150/* Buffer Descriptor List Lower Base Address */
151#define CL_SD_BDLPLBA_SHIFT 7
152#define CL_SD_BDLPLBA_MASK (0x1ffffff << CL_SD_BDLPLBA_SHIFT)
153#define CL_SD_BDLPLBA(x) \
154 ((BDL_ALIGN(lower_32_bits(x)) << CL_SD_BDLPLBA_SHIFT) & CL_SD_BDLPLBA_MASK)
155
156/* Buffer Descriptor List Upper Base Address */
157#define CL_SD_BDLPUBA_SHIFT 0
158#define CL_SD_BDLPUBA_MASK (0xffffffff << CL_SD_BDLPUBA_SHIFT)
159#define CL_SD_BDLPUBA(x) \
160 ((upper_32_bits(x) << CL_SD_BDLPUBA_SHIFT) & CL_SD_BDLPUBA_MASK)
161
162/*
163 * Code Loader - Software Position Based FIFO
164 * Capability Registers x Software Position Based FIFO Header
165 */
166
167/* Next Capability Pointer */
168#define CL_SPBFIFO_SPBFCH_PTR_SHIFT 0
169#define CL_SPBFIFO_SPBFCH_PTR_MASK (0xff << CL_SPBFIFO_SPBFCH_PTR_SHIFT)
170#define CL_SPBFIFO_SPBFCH_PTR(x) \
171 ((x << CL_SPBFIFO_SPBFCH_PTR_SHIFT) & CL_SPBFIFO_SPBFCH_PTR_MASK)
172
173/* Capability Identifier */
174#define CL_SPBFIFO_SPBFCH_ID_SHIFT 16
175#define CL_SPBFIFO_SPBFCH_ID_MASK (0xfff << CL_SPBFIFO_SPBFCH_ID_SHIFT)
176#define CL_SPBFIFO_SPBFCH_ID(x) \
177 ((x << CL_SPBFIFO_SPBFCH_ID_SHIFT) & CL_SPBFIFO_SPBFCH_ID_MASK)
178
179/* Capability Version */
180#define CL_SPBFIFO_SPBFCH_VER_SHIFT 28
181#define CL_SPBFIFO_SPBFCH_VER_MASK (0xf << CL_SPBFIFO_SPBFCH_VER_SHIFT)
182#define CL_SPBFIFO_SPBFCH_VER(x) \
183 ((x << CL_SPBFIFO_SPBFCH_VER_SHIFT) & CL_SPBFIFO_SPBFCH_VER_MASK)
184
185/* Software Position in Buffer Enable */
186#define CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT 0
187#define CL_SPBFIFO_SPBFCCTL_SPIBE_MASK (1 << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
188#define CL_SPBFIFO_SPBFCCTL_SPIBE(x) \
189 ((x << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
190
191/* SST IPC SKL defines */
192#define SKL_WAIT_TIMEOUT 500 /* 500 msec */
193#define SKL_MAX_BUFFER_SIZE (32 * PAGE_SIZE)
194
195enum skl_cl_dma_wake_states {
196 SKL_CL_DMA_STATUS_NONE = 0,
197 SKL_CL_DMA_BUF_COMPLETE,
198 SKL_CL_DMA_ERR, /* TODO: Expand the error states */
199};
200
201struct sst_dsp;
202
203struct skl_cl_dev_ops {
204 void (*cl_setup_bdle)(struct sst_dsp *ctx,
205 struct snd_dma_buffer *dmab_data,
206 u32 **bdlp, int size, int with_ioc);
207 void (*cl_setup_controller)(struct sst_dsp *ctx,
208 struct snd_dma_buffer *dmab_bdl,
209 unsigned int max_size, u32 page_count);
210 void (*cl_setup_spb)(struct sst_dsp *ctx,
211 unsigned int size, bool enable);
212 void (*cl_cleanup_spb)(struct sst_dsp *ctx);
213 void (*cl_trigger)(struct sst_dsp *ctx, bool enable);
214 void (*cl_cleanup_controller)(struct sst_dsp *ctx);
215 int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
216 const void *bin, u32 size);
217 void (*cl_stop_dma)(struct sst_dsp *ctx);
218};
219
220/**
221 * skl_cl_dev - holds information for code loader dma transfer
222 *
223 * @dmab_data: buffer pointer
224 * @dmab_bdl: buffer descriptor list
225 * @bufsize: ring buffer size
226 * @frags: Last valid buffer descriptor index in the BDL
227 * @curr_spib_pos: Current position in ring buffer
228 * @dma_buffer_offset: dma buffer offset
229 * @ops: operations supported on CL dma
230 * @wait_queue: wait queue to wake for wake event
231 * @wake_status: DMA wake status
232 * @wait_condition: condition to wait on wait queue
233 * @cl_dma_lock: for synchronized access to cldma
234 */
235struct skl_cl_dev {
236 struct snd_dma_buffer dmab_data;
237 struct snd_dma_buffer dmab_bdl;
238
239 unsigned int bufsize;
240 unsigned int frags;
241
242 unsigned int curr_spib_pos;
243 unsigned int dma_buffer_offset;
244 struct skl_cl_dev_ops ops;
245
246 wait_queue_head_t wait_queue;
247 int wake_status;
248 bool wait_condition;
249};
250
251#endif /* SKL_SST_CLDMA_H_ */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
new file mode 100644
index 000000000000..94875b008b0b
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -0,0 +1,342 @@
1/*
2 * skl-sst-dsp.c - SKL SST library generic function
3 *
4 * Copyright (C) 2014-15, Intel Corporation.
5 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6 * Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18#include <sound/pcm.h>
19
20#include "../common/sst-dsp.h"
21#include "../common/sst-ipc.h"
22#include "../common/sst-dsp-priv.h"
23#include "skl-sst-ipc.h"
24
25/* various timeout values */
26#define SKL_DSP_PU_TO 50
27#define SKL_DSP_PD_TO 50
28#define SKL_DSP_RESET_TO 50
29
30void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
31{
32 mutex_lock(&ctx->mutex);
33 ctx->sst_state = state;
34 mutex_unlock(&ctx->mutex);
35}
36
37static int skl_dsp_core_set_reset_state(struct sst_dsp *ctx)
38{
39 int ret;
40
41 /* update bits */
42 sst_dsp_shim_update_bits_unlocked(ctx,
43 SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
44 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
45
46 /* poll with timeout to check if operation successful */
47 ret = sst_dsp_register_poll(ctx,
48 SKL_ADSP_REG_ADSPCS,
49 SKL_ADSPCS_CRST_MASK,
50 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
51 SKL_DSP_RESET_TO,
52 "Set reset");
53 if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
54 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
55 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
56 dev_err(ctx->dev, "Set reset state failed\n");
57 ret = -EIO;
58 }
59
60 return ret;
61}
62
63static int skl_dsp_core_unset_reset_state(struct sst_dsp *ctx)
64{
65 int ret;
66
67 dev_dbg(ctx->dev, "In %s\n", __func__);
68
69 /* update bits */
70 sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
71 SKL_ADSPCS_CRST_MASK, 0);
72
73 /* poll with timeout to check if operation successful */
74 ret = sst_dsp_register_poll(ctx,
75 SKL_ADSP_REG_ADSPCS,
76 SKL_ADSPCS_CRST_MASK,
77 0,
78 SKL_DSP_RESET_TO,
79 "Unset reset");
80
81 if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
82 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
83 dev_err(ctx->dev, "Unset reset state failed\n");
84 ret = -EIO;
85 }
86
87 return ret;
88}
89
90static bool is_skl_dsp_core_enable(struct sst_dsp *ctx)
91{
92 int val;
93 bool is_enable;
94
95 val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
96
97 is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
98 (val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
99 !(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
100 !(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
101
102 dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
103 return is_enable;
104}
105
106static int skl_dsp_reset_core(struct sst_dsp *ctx)
107{
108 /* stall core */
109 sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
110 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
111 SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
112
113 /* set reset state */
114 return skl_dsp_core_set_reset_state(ctx);
115}
116
117static int skl_dsp_start_core(struct sst_dsp *ctx)
118{
119 int ret;
120
121 /* unset reset state */
122 ret = skl_dsp_core_unset_reset_state(ctx);
123 if (ret < 0) {
124 dev_dbg(ctx->dev, "dsp unset reset fails\n");
125 return ret;
126 }
127
128 /* run core */
129 dev_dbg(ctx->dev, "run core...\n");
130 sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
131 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
132 ~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
133
134 if (!is_skl_dsp_core_enable(ctx)) {
135 skl_dsp_reset_core(ctx);
136 dev_err(ctx->dev, "DSP core enable failed\n");
137 ret = -EIO;
138 }
139
140 return ret;
141}
142
143static int skl_dsp_core_power_up(struct sst_dsp *ctx)
144{
145 int ret;
146
147 /* update bits */
148 sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
149 SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
150
151 /* poll with timeout to check if operation successful */
152 ret = sst_dsp_register_poll(ctx,
153 SKL_ADSP_REG_ADSPCS,
154 SKL_ADSPCS_CPA_MASK,
155 SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
156 SKL_DSP_PU_TO,
157 "Power up");
158
159 if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
160 SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
161 SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
162 dev_err(ctx->dev, "DSP core power up failed\n");
163 ret = -EIO;
164 }
165
166 return ret;
167}
168
169static int skl_dsp_core_power_down(struct sst_dsp *ctx)
170{
171 /* update bits */
172 sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
173 SKL_ADSPCS_SPA_MASK, 0);
174
175 /* poll with timeout to check if operation successful */
176 return sst_dsp_register_poll(ctx,
177 SKL_ADSP_REG_ADSPCS,
178 SKL_ADSPCS_SPA_MASK,
179 0,
180 SKL_DSP_PD_TO,
181 "Power down");
182}
183
184static int skl_dsp_enable_core(struct sst_dsp *ctx)
185{
186 int ret;
187
188 /* power up */
189 ret = skl_dsp_core_power_up(ctx);
190 if (ret < 0) {
191 dev_dbg(ctx->dev, "dsp core power up failed\n");
192 return ret;
193 }
194
195 return skl_dsp_start_core(ctx);
196}
197
198int skl_dsp_disable_core(struct sst_dsp *ctx)
199{
200 int ret;
201
202 ret = skl_dsp_reset_core(ctx);
203 if (ret < 0) {
204 dev_err(ctx->dev, "dsp core reset failed\n");
205 return ret;
206 }
207
208 /* power down core*/
209 ret = skl_dsp_core_power_down(ctx);
210 if (ret < 0) {
211 dev_err(ctx->dev, "dsp core power down failed\n");
212 return ret;
213 }
214
215 if (is_skl_dsp_core_enable(ctx)) {
216 dev_err(ctx->dev, "DSP core disable failed\n");
217 ret = -EIO;
218 }
219
220 return ret;
221}
222
223int skl_dsp_boot(struct sst_dsp *ctx)
224{
225 int ret;
226
227 if (is_skl_dsp_core_enable(ctx)) {
228 dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
229 ret = skl_dsp_reset_core(ctx);
230 if (ret < 0) {
231 dev_err(ctx->dev, "dsp reset failed\n");
232 return ret;
233 }
234
235 ret = skl_dsp_start_core(ctx);
236 if (ret < 0) {
237 dev_err(ctx->dev, "dsp start failed\n");
238 return ret;
239 }
240 } else {
241 dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
242 ret = skl_dsp_disable_core(ctx);
243
244 if (ret < 0) {
245 dev_err(ctx->dev, "dsp disable core failes\n");
246 return ret;
247 }
248 ret = skl_dsp_enable_core(ctx);
249 }
250
251 return ret;
252}
253
254irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
255{
256 struct sst_dsp *ctx = dev_id;
257 u32 val;
258 irqreturn_t result = IRQ_NONE;
259
260 spin_lock(&ctx->spinlock);
261
262 val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
263 ctx->intr_status = val;
264
265 if (val & SKL_ADSPIS_IPC) {
266 skl_ipc_int_disable(ctx);
267 result = IRQ_WAKE_THREAD;
268 }
269
270 if (val & SKL_ADSPIS_CL_DMA) {
271 skl_cldma_int_disable(ctx);
272 result = IRQ_WAKE_THREAD;
273 }
274
275 spin_unlock(&ctx->spinlock);
276
277 return result;
278}
279
280int skl_dsp_wake(struct sst_dsp *ctx)
281{
282 return ctx->fw_ops.set_state_D0(ctx);
283}
284EXPORT_SYMBOL_GPL(skl_dsp_wake);
285
286int skl_dsp_sleep(struct sst_dsp *ctx)
287{
288 return ctx->fw_ops.set_state_D3(ctx);
289}
290EXPORT_SYMBOL_GPL(skl_dsp_sleep);
291
292struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
293 struct sst_dsp_device *sst_dev, int irq)
294{
295 int ret;
296 struct sst_dsp *sst;
297
298 sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
299 if (sst == NULL)
300 return NULL;
301
302 spin_lock_init(&sst->spinlock);
303 mutex_init(&sst->mutex);
304 sst->dev = dev;
305 sst->sst_dev = sst_dev;
306 sst->irq = irq;
307 sst->ops = sst_dev->ops;
308 sst->thread_context = sst_dev->thread_context;
309
310 /* Initialise SST Audio DSP */
311 if (sst->ops->init) {
312 ret = sst->ops->init(sst, NULL);
313 if (ret < 0)
314 return NULL;
315 }
316
317 /* Register the ISR */
318 ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
319 sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
320 if (ret) {
321 dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
322 sst->irq);
323 return NULL;
324 }
325
326 return sst;
327}
328
329void skl_dsp_free(struct sst_dsp *dsp)
330{
331 skl_ipc_int_disable(dsp);
332
333 free_irq(dsp->irq, dsp);
334 skl_dsp_disable_core(dsp);
335}
336EXPORT_SYMBOL_GPL(skl_dsp_free);
337
338bool is_skl_dsp_running(struct sst_dsp *ctx)
339{
340 return (ctx->sst_state == SKL_DSP_RUNNING);
341}
342EXPORT_SYMBOL_GPL(is_skl_dsp_running);
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
new file mode 100644
index 000000000000..6bfcef449bdc
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -0,0 +1,145 @@
1/*
2 * Skylake SST DSP Support
3 *
4 * Copyright (C) 2014-15, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#ifndef __SKL_SST_DSP_H__
17#define __SKL_SST_DSP_H__
18
19#include <linux/interrupt.h>
20#include <sound/memalloc.h>
21#include "skl-sst-cldma.h"
22
23struct sst_dsp;
24struct skl_sst;
25struct sst_dsp_device;
26
27/* Intel HD Audio General DSP Registers */
28#define SKL_ADSP_GEN_BASE 0x0
29#define SKL_ADSP_REG_ADSPCS (SKL_ADSP_GEN_BASE + 0x04)
30#define SKL_ADSP_REG_ADSPIC (SKL_ADSP_GEN_BASE + 0x08)
31#define SKL_ADSP_REG_ADSPIS (SKL_ADSP_GEN_BASE + 0x0C)
32#define SKL_ADSP_REG_ADSPIC2 (SKL_ADSP_GEN_BASE + 0x10)
33#define SKL_ADSP_REG_ADSPIS2 (SKL_ADSP_GEN_BASE + 0x14)
34
35/* Intel HD Audio Inter-Processor Communication Registers */
36#define SKL_ADSP_IPC_BASE 0x40
37#define SKL_ADSP_REG_HIPCT (SKL_ADSP_IPC_BASE + 0x00)
38#define SKL_ADSP_REG_HIPCTE (SKL_ADSP_IPC_BASE + 0x04)
39#define SKL_ADSP_REG_HIPCI (SKL_ADSP_IPC_BASE + 0x08)
40#define SKL_ADSP_REG_HIPCIE (SKL_ADSP_IPC_BASE + 0x0C)
41#define SKL_ADSP_REG_HIPCCTL (SKL_ADSP_IPC_BASE + 0x10)
42
43/* HIPCI */
44#define SKL_ADSP_REG_HIPCI_BUSY BIT(31)
45
46/* HIPCIE */
47#define SKL_ADSP_REG_HIPCIE_DONE BIT(30)
48
49/* HIPCCTL */
50#define SKL_ADSP_REG_HIPCCTL_DONE BIT(1)
51#define SKL_ADSP_REG_HIPCCTL_BUSY BIT(0)
52
53/* HIPCT */
54#define SKL_ADSP_REG_HIPCT_BUSY BIT(31)
55
56/* Intel HD Audio SRAM Window 1 */
57#define SKL_ADSP_SRAM1_BASE 0xA000
58
59#define SKL_ADSP_MMIO_LEN 0x10000
60
61#define SKL_ADSP_W0_STAT_SZ 0x800
62
63#define SKL_ADSP_W0_UP_SZ 0x800
64
65#define SKL_ADSP_W1_SZ 0x1000
66
67#define SKL_FW_STS_MASK 0xf
68
69#define SKL_FW_INIT 0x1
70#define SKL_FW_RFW_START 0xf
71
72#define SKL_ADSPIC_IPC 1
73#define SKL_ADSPIS_IPC 1
74
75/* ADSPCS - Audio DSP Control & Status */
76#define SKL_DSP_CORES 1
77#define SKL_DSP_CORE0_MASK 1
78#define SKL_DSP_CORES_MASK ((1 << SKL_DSP_CORES) - 1)
79
80/* Core Reset - asserted high */
81#define SKL_ADSPCS_CRST_SHIFT 0
82#define SKL_ADSPCS_CRST_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
83#define SKL_ADSPCS_CRST(x) ((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
84
85/* Core run/stall - when set to '1' core is stalled */
86#define SKL_ADSPCS_CSTALL_SHIFT 8
87#define SKL_ADSPCS_CSTALL_MASK (SKL_DSP_CORES_MASK << \
88 SKL_ADSPCS_CSTALL_SHIFT)
89#define SKL_ADSPCS_CSTALL(x) ((x << SKL_ADSPCS_CSTALL_SHIFT) & \
90 SKL_ADSPCS_CSTALL_MASK)
91
92/* Set Power Active - when set to '1' turn cores on */
93#define SKL_ADSPCS_SPA_SHIFT 16
94#define SKL_ADSPCS_SPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
95#define SKL_ADSPCS_SPA(x) ((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
96
97/* Current Power Active - power status of cores, set by hardware */
98#define SKL_ADSPCS_CPA_SHIFT 24
99#define SKL_ADSPCS_CPA_MASK (SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
100#define SKL_ADSPCS_CPA(x) ((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
101
102#define SST_DSP_POWER_D0 0x0 /* full On */
103#define SST_DSP_POWER_D3 0x3 /* Off */
104
105enum skl_dsp_states {
106 SKL_DSP_RUNNING = 1,
107 SKL_DSP_RESET,
108};
109
110struct skl_dsp_fw_ops {
111 int (*load_fw)(struct sst_dsp *ctx);
112 /* FW module parser/loader */
113 int (*parse_fw)(struct sst_dsp *ctx);
114 int (*set_state_D0)(struct sst_dsp *ctx);
115 int (*set_state_D3)(struct sst_dsp *ctx);
116 unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
117};
118
119struct skl_dsp_loader_ops {
120 int (*alloc_dma_buf)(struct device *dev,
121 struct snd_dma_buffer *dmab, size_t size);
122 int (*free_dma_buf)(struct device *dev,
123 struct snd_dma_buffer *dmab);
124};
125
126void skl_cldma_process_intr(struct sst_dsp *ctx);
127void skl_cldma_int_disable(struct sst_dsp *ctx);
128int skl_cldma_prepare(struct sst_dsp *ctx);
129
130void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
131struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
132 struct sst_dsp_device *sst_dev, int irq);
133int skl_dsp_disable_core(struct sst_dsp *ctx);
134bool is_skl_dsp_running(struct sst_dsp *ctx);
135irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
136int skl_dsp_wake(struct sst_dsp *ctx);
137int skl_dsp_sleep(struct sst_dsp *ctx);
138void skl_dsp_free(struct sst_dsp *dsp);
139
140int skl_dsp_boot(struct sst_dsp *ctx);
141int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
142 struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp);
143void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
144
145#endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
new file mode 100644
index 000000000000..937a0a3a63a0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -0,0 +1,771 @@
1/*
2 * skl-sst-ipc.c - Intel skl IPC Support
3 *
4 * Copyright (C) 2014-15, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15#include <linux/device.h>
16
17#include "../common/sst-dsp.h"
18#include "../common/sst-dsp-priv.h"
19#include "skl-sst-dsp.h"
20#include "skl-sst-ipc.h"
21
22
23#define IPC_IXC_STATUS_BITS 24
24
25/* Global Message - Generic */
26#define IPC_GLB_TYPE_SHIFT 24
27#define IPC_GLB_TYPE_MASK (0xf << IPC_GLB_TYPE_SHIFT)
28#define IPC_GLB_TYPE(x) ((x) << IPC_GLB_TYPE_SHIFT)
29
30/* Global Message - Reply */
31#define IPC_GLB_REPLY_STATUS_SHIFT 24
32#define IPC_GLB_REPLY_STATUS_MASK ((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1)
33#define IPC_GLB_REPLY_STATUS(x) ((x) << IPC_GLB_REPLY_STATUS_SHIFT)
34
35#define IPC_TIMEOUT_MSECS 3000
36
37#define IPC_EMPTY_LIST_SIZE 8
38
39#define IPC_MSG_TARGET_SHIFT 30
40#define IPC_MSG_TARGET_MASK 0x1
41#define IPC_MSG_TARGET(x) (((x) & IPC_MSG_TARGET_MASK) \
42 << IPC_MSG_TARGET_SHIFT)
43
44#define IPC_MSG_DIR_SHIFT 29
45#define IPC_MSG_DIR_MASK 0x1
46#define IPC_MSG_DIR(x) (((x) & IPC_MSG_DIR_MASK) \
47 << IPC_MSG_DIR_SHIFT)
48/* Global Notification Message */
49#define IPC_GLB_NOTIFY_TYPE_SHIFT 16
50#define IPC_GLB_NOTIFY_TYPE_MASK 0xFF
51#define IPC_GLB_NOTIFY_TYPE(x) (((x) >> IPC_GLB_NOTIFY_TYPE_SHIFT) \
52 & IPC_GLB_NOTIFY_TYPE_MASK)
53
54#define IPC_GLB_NOTIFY_MSG_TYPE_SHIFT 24
55#define IPC_GLB_NOTIFY_MSG_TYPE_MASK 0x1F
56#define IPC_GLB_NOTIFY_MSG_TYPE(x) (((x) >> IPC_GLB_NOTIFY_MSG_TYPE_SHIFT) \
57 & IPC_GLB_NOTIFY_MSG_TYPE_MASK)
58
59#define IPC_GLB_NOTIFY_RSP_SHIFT 29
60#define IPC_GLB_NOTIFY_RSP_MASK 0x1
61#define IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> IPC_GLB_NOTIFY_RSP_SHIFT) \
62 & IPC_GLB_NOTIFY_RSP_MASK)
63
64/* Pipeline operations */
65
66/* Create pipeline message */
67#define IPC_PPL_MEM_SIZE_SHIFT 0
68#define IPC_PPL_MEM_SIZE_MASK 0x7FF
69#define IPC_PPL_MEM_SIZE(x) (((x) & IPC_PPL_MEM_SIZE_MASK) \
70 << IPC_PPL_MEM_SIZE_SHIFT)
71
72#define IPC_PPL_TYPE_SHIFT 11
73#define IPC_PPL_TYPE_MASK 0x1F
74#define IPC_PPL_TYPE(x) (((x) & IPC_PPL_TYPE_MASK) \
75 << IPC_PPL_TYPE_SHIFT)
76
77#define IPC_INSTANCE_ID_SHIFT 16
78#define IPC_INSTANCE_ID_MASK 0xFF
79#define IPC_INSTANCE_ID(x) (((x) & IPC_INSTANCE_ID_MASK) \
80 << IPC_INSTANCE_ID_SHIFT)
81
82/* Set pipeline state message */
83#define IPC_PPL_STATE_SHIFT 0
84#define IPC_PPL_STATE_MASK 0x1F
85#define IPC_PPL_STATE(x) (((x) & IPC_PPL_STATE_MASK) \
86 << IPC_PPL_STATE_SHIFT)
87
88/* Module operations primary register */
89#define IPC_MOD_ID_SHIFT 0
90#define IPC_MOD_ID_MASK 0xFFFF
91#define IPC_MOD_ID(x) (((x) & IPC_MOD_ID_MASK) \
92 << IPC_MOD_ID_SHIFT)
93
94#define IPC_MOD_INSTANCE_ID_SHIFT 16
95#define IPC_MOD_INSTANCE_ID_MASK 0xFF
96#define IPC_MOD_INSTANCE_ID(x) (((x) & IPC_MOD_INSTANCE_ID_MASK) \
97 << IPC_MOD_INSTANCE_ID_SHIFT)
98
99/* Init instance message extension register */
100#define IPC_PARAM_BLOCK_SIZE_SHIFT 0
101#define IPC_PARAM_BLOCK_SIZE_MASK 0xFFFF
102#define IPC_PARAM_BLOCK_SIZE(x) (((x) & IPC_PARAM_BLOCK_SIZE_MASK) \
103 << IPC_PARAM_BLOCK_SIZE_SHIFT)
104
105#define IPC_PPL_INSTANCE_ID_SHIFT 16
106#define IPC_PPL_INSTANCE_ID_MASK 0xFF
107#define IPC_PPL_INSTANCE_ID(x) (((x) & IPC_PPL_INSTANCE_ID_MASK) \
108 << IPC_PPL_INSTANCE_ID_SHIFT)
109
110#define IPC_CORE_ID_SHIFT 24
111#define IPC_CORE_ID_MASK 0x1F
112#define IPC_CORE_ID(x) (((x) & IPC_CORE_ID_MASK) \
113 << IPC_CORE_ID_SHIFT)
114
115/* Bind/Unbind message extension register */
116#define IPC_DST_MOD_ID_SHIFT 0
117#define IPC_DST_MOD_ID(x) (((x) & IPC_MOD_ID_MASK) \
118 << IPC_DST_MOD_ID_SHIFT)
119
120#define IPC_DST_MOD_INSTANCE_ID_SHIFT 16
121#define IPC_DST_MOD_INSTANCE_ID(x) (((x) & IPC_MOD_INSTANCE_ID_MASK) \
122 << IPC_DST_MOD_INSTANCE_ID_SHIFT)
123
124#define IPC_DST_QUEUE_SHIFT 24
125#define IPC_DST_QUEUE_MASK 0x7
126#define IPC_DST_QUEUE(x) (((x) & IPC_DST_QUEUE_MASK) \
127 << IPC_DST_QUEUE_SHIFT)
128
129#define IPC_SRC_QUEUE_SHIFT 27
130#define IPC_SRC_QUEUE_MASK 0x7
131#define IPC_SRC_QUEUE(x) (((x) & IPC_SRC_QUEUE_MASK) \
132 << IPC_SRC_QUEUE_SHIFT)
133
134/* Save pipeline messgae extension register */
135#define IPC_DMA_ID_SHIFT 0
136#define IPC_DMA_ID_MASK 0x1F
137#define IPC_DMA_ID(x) (((x) & IPC_DMA_ID_MASK) \
138 << IPC_DMA_ID_SHIFT)
139/* Large Config message extension register */
140#define IPC_DATA_OFFSET_SZ_SHIFT 0
141#define IPC_DATA_OFFSET_SZ_MASK 0xFFFFF
142#define IPC_DATA_OFFSET_SZ(x) (((x) & IPC_DATA_OFFSET_SZ_MASK) \
143 << IPC_DATA_OFFSET_SZ_SHIFT)
144#define IPC_DATA_OFFSET_SZ_CLEAR ~(IPC_DATA_OFFSET_SZ_MASK \
145 << IPC_DATA_OFFSET_SZ_SHIFT)
146
147#define IPC_LARGE_PARAM_ID_SHIFT 20
148#define IPC_LARGE_PARAM_ID_MASK 0xFF
149#define IPC_LARGE_PARAM_ID(x) (((x) & IPC_LARGE_PARAM_ID_MASK) \
150 << IPC_LARGE_PARAM_ID_SHIFT)
151
152#define IPC_FINAL_BLOCK_SHIFT 28
153#define IPC_FINAL_BLOCK_MASK 0x1
154#define IPC_FINAL_BLOCK(x) (((x) & IPC_FINAL_BLOCK_MASK) \
155 << IPC_FINAL_BLOCK_SHIFT)
156
157#define IPC_INITIAL_BLOCK_SHIFT 29
158#define IPC_INITIAL_BLOCK_MASK 0x1
159#define IPC_INITIAL_BLOCK(x) (((x) & IPC_INITIAL_BLOCK_MASK) \
160 << IPC_INITIAL_BLOCK_SHIFT)
161#define IPC_INITIAL_BLOCK_CLEAR ~(IPC_INITIAL_BLOCK_MASK \
162 << IPC_INITIAL_BLOCK_SHIFT)
163
164enum skl_ipc_msg_target {
165 IPC_FW_GEN_MSG = 0,
166 IPC_MOD_MSG = 1
167};
168
169enum skl_ipc_msg_direction {
170 IPC_MSG_REQUEST = 0,
171 IPC_MSG_REPLY = 1
172};
173
174/* Global Message Types */
175enum skl_ipc_glb_type {
176 IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
177 IPC_GLB_LOAD_MULTIPLE_MODS = 15,
178 IPC_GLB_UNLOAD_MULTIPLE_MODS = 16,
179 IPC_GLB_CREATE_PPL = 17,
180 IPC_GLB_DELETE_PPL = 18,
181 IPC_GLB_SET_PPL_STATE = 19,
182 IPC_GLB_GET_PPL_STATE = 20,
183 IPC_GLB_GET_PPL_CONTEXT_SIZE = 21,
184 IPC_GLB_SAVE_PPL = 22,
185 IPC_GLB_RESTORE_PPL = 23,
186 IPC_GLB_NOTIFY = 26,
187 IPC_GLB_MAX_IPC_MSG_NUMBER = 31 /* Maximum message number */
188};
189
190enum skl_ipc_glb_reply {
191 IPC_GLB_REPLY_SUCCESS = 0,
192
193 IPC_GLB_REPLY_UNKNOWN_MSG_TYPE = 1,
194 IPC_GLB_REPLY_ERROR_INVALID_PARAM = 2,
195
196 IPC_GLB_REPLY_BUSY = 3,
197 IPC_GLB_REPLY_PENDING = 4,
198 IPC_GLB_REPLY_FAILURE = 5,
199 IPC_GLB_REPLY_INVALID_REQUEST = 6,
200
201 IPC_GLB_REPLY_OUT_OF_MEMORY = 7,
202 IPC_GLB_REPLY_OUT_OF_MIPS = 8,
203
204 IPC_GLB_REPLY_INVALID_RESOURCE_ID = 9,
205 IPC_GLB_REPLY_INVALID_RESOURCE_STATE = 10,
206
207 IPC_GLB_REPLY_MOD_MGMT_ERROR = 100,
208 IPC_GLB_REPLY_MOD_LOAD_CL_FAILED = 101,
209 IPC_GLB_REPLY_MOD_LOAD_INVALID_HASH = 102,
210
211 IPC_GLB_REPLY_MOD_UNLOAD_INST_EXIST = 103,
212 IPC_GLB_REPLY_MOD_NOT_INITIALIZED = 104,
213
214 IPC_GLB_REPLY_INVALID_CONFIG_PARAM_ID = 120,
215 IPC_GLB_REPLY_INVALID_CONFIG_DATA_LEN = 121,
216 IPC_GLB_REPLY_GATEWAY_NOT_INITIALIZED = 140,
217 IPC_GLB_REPLY_GATEWAY_NOT_EXIST = 141,
218
219 IPC_GLB_REPLY_PPL_NOT_INITIALIZED = 160,
220 IPC_GLB_REPLY_PPL_NOT_EXIST = 161,
221 IPC_GLB_REPLY_PPL_SAVE_FAILED = 162,
222 IPC_GLB_REPLY_PPL_RESTORE_FAILED = 163,
223
224 IPC_MAX_STATUS = ((1<<IPC_IXC_STATUS_BITS)-1)
225};
226
227enum skl_ipc_notification_type {
228 IPC_GLB_NOTIFY_GLITCH = 0,
229 IPC_GLB_NOTIFY_OVERRUN = 1,
230 IPC_GLB_NOTIFY_UNDERRUN = 2,
231 IPC_GLB_NOTIFY_END_STREAM = 3,
232 IPC_GLB_NOTIFY_PHRASE_DETECTED = 4,
233 IPC_GLB_NOTIFY_RESOURCE_EVENT = 5,
234 IPC_GLB_NOTIFY_LOG_BUFFER_STATUS = 6,
235 IPC_GLB_NOTIFY_TIMESTAMP_CAPTURED = 7,
236 IPC_GLB_NOTIFY_FW_READY = 8
237};
238
239/* Module Message Types */
240enum skl_ipc_module_msg {
241 IPC_MOD_INIT_INSTANCE = 0,
242 IPC_MOD_CONFIG_GET = 1,
243 IPC_MOD_CONFIG_SET = 2,
244 IPC_MOD_LARGE_CONFIG_GET = 3,
245 IPC_MOD_LARGE_CONFIG_SET = 4,
246 IPC_MOD_BIND = 5,
247 IPC_MOD_UNBIND = 6,
248 IPC_MOD_SET_DX = 7
249};
250
251static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
252 size_t tx_size)
253{
254 if (tx_size)
255 memcpy(msg->tx_data, tx_data, tx_size);
256}
257
258static bool skl_ipc_is_dsp_busy(struct sst_dsp *dsp)
259{
260 u32 hipci;
261
262 hipci = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCI);
263 return (hipci & SKL_ADSP_REG_HIPCI_BUSY);
264}
265
266/* Lock to be held by caller */
267static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
268{
269 struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->header);
270
271 if (msg->tx_size)
272 sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
273 sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCIE,
274 header->extension);
275 sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCI,
276 header->primary | SKL_ADSP_REG_HIPCI_BUSY);
277}
278
279static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
280 u64 ipc_header)
281{
282 struct ipc_message *msg = NULL;
283 struct skl_ipc_header *header = (struct skl_ipc_header *)(&ipc_header);
284
285 if (list_empty(&ipc->rx_list)) {
286 dev_err(ipc->dev, "ipc: rx list is empty but received 0x%x\n",
287 header->primary);
288 goto out;
289 }
290
291 msg = list_first_entry(&ipc->rx_list, struct ipc_message, list);
292
293out:
294 return msg;
295
296}
297
298static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
299 struct skl_ipc_header header)
300{
301 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
302
303 if (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
304 switch (IPC_GLB_NOTIFY_TYPE(header.primary)) {
305
306 case IPC_GLB_NOTIFY_UNDERRUN:
307 dev_err(ipc->dev, "FW Underrun %x\n", header.primary);
308 break;
309
310 case IPC_GLB_NOTIFY_RESOURCE_EVENT:
311 dev_err(ipc->dev, "MCPS Budget Violation: %x\n",
312 header.primary);
313 break;
314
315 case IPC_GLB_NOTIFY_FW_READY:
316 skl->boot_complete = true;
317 wake_up(&skl->boot_wait);
318 break;
319
320 default:
321 dev_err(ipc->dev, "ipc: Unhandled error msg=%x",
322 header.primary);
323 break;
324 }
325 }
326
327 return 0;
328}
329
330static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
331 struct skl_ipc_header header)
332{
333 struct ipc_message *msg;
334 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
335 u64 *ipc_header = (u64 *)(&header);
336
337 msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
338 if (msg == NULL) {
339 dev_dbg(ipc->dev, "ipc: rx list is empty\n");
340 return;
341 }
342
343 /* first process the header */
344 switch (reply) {
345 case IPC_GLB_REPLY_SUCCESS:
346 dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
347 break;
348
349 case IPC_GLB_REPLY_OUT_OF_MEMORY:
350 dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary);
351 msg->errno = -ENOMEM;
352 break;
353
354 case IPC_GLB_REPLY_BUSY:
355 dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary);
356 msg->errno = -EBUSY;
357 break;
358
359 default:
360 dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply);
361 msg->errno = -EINVAL;
362 break;
363 }
364
365 if (reply != IPC_GLB_REPLY_SUCCESS) {
366 dev_err(ipc->dev, "ipc FW reply: reply=%d", reply);
367 dev_err(ipc->dev, "FW Error Code: %u\n",
368 ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
369 }
370
371 list_del(&msg->list);
372 sst_ipc_tx_msg_reply_complete(ipc, msg);
373}
374
375irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
376{
377 struct sst_dsp *dsp = context;
378 struct skl_sst *skl = sst_dsp_get_thread_context(dsp);
379 struct sst_generic_ipc *ipc = &skl->ipc;
380 struct skl_ipc_header header = {0};
381 u32 hipcie, hipct, hipcte;
382 int ipc_irq = 0;
383
384 if (dsp->intr_status & SKL_ADSPIS_CL_DMA)
385 skl_cldma_process_intr(dsp);
386
387 /* Here we handle IPC interrupts only */
388 if (!(dsp->intr_status & SKL_ADSPIS_IPC))
389 return IRQ_NONE;
390
391 hipcie = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCIE);
392 hipct = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCT);
393
394 /* reply message from DSP */
395 if (hipcie & SKL_ADSP_REG_HIPCIE_DONE) {
396 sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
397 SKL_ADSP_REG_HIPCCTL_DONE, 0);
398
399 /* clear DONE bit - tell DSP we have completed the operation */
400 sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCIE,
401 SKL_ADSP_REG_HIPCIE_DONE, SKL_ADSP_REG_HIPCIE_DONE);
402
403 ipc_irq = 1;
404
405 /* unmask Done interrupt */
406 sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
407 SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
408 }
409
410 /* New message from DSP */
411 if (hipct & SKL_ADSP_REG_HIPCT_BUSY) {
412 hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
413 header.primary = hipct;
414 header.extension = hipcte;
415 dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
416 header.primary);
417 dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
418 header.extension);
419
420 if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
421 /* Handle Immediate reply from DSP Core */
422 skl_ipc_process_reply(ipc, header);
423 } else {
424 dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
425 skl_ipc_process_notification(ipc, header);
426 }
427 /* clear busy interrupt */
428 sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCT,
429 SKL_ADSP_REG_HIPCT_BUSY, SKL_ADSP_REG_HIPCT_BUSY);
430 ipc_irq = 1;
431 }
432
433 if (ipc_irq == 0)
434 return IRQ_NONE;
435
436 skl_ipc_int_enable(dsp);
437
438 /* continue to send any remaining messages... */
439 queue_kthread_work(&ipc->kworker, &ipc->kwork);
440
441 return IRQ_HANDLED;
442}
443
444void skl_ipc_int_enable(struct sst_dsp *ctx)
445{
446 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_ADSPIC,
447 SKL_ADSPIC_IPC, SKL_ADSPIC_IPC);
448}
449
450void skl_ipc_int_disable(struct sst_dsp *ctx)
451{
452 sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
453 SKL_ADSPIC_IPC, 0);
454}
455
456void skl_ipc_op_int_enable(struct sst_dsp *ctx)
457{
458 /* enable IPC DONE interrupt */
459 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
460 SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
461
462 /* Enable IPC BUSY interrupt */
463 sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
464 SKL_ADSP_REG_HIPCCTL_BUSY, SKL_ADSP_REG_HIPCCTL_BUSY);
465}
466
467bool skl_ipc_int_status(struct sst_dsp *ctx)
468{
469 return sst_dsp_shim_read_unlocked(ctx,
470 SKL_ADSP_REG_ADSPIS) & SKL_ADSPIS_IPC;
471}
472
473int skl_ipc_init(struct device *dev, struct skl_sst *skl)
474{
475 struct sst_generic_ipc *ipc;
476 int err;
477
478 ipc = &skl->ipc;
479 ipc->dsp = skl->dsp;
480 ipc->dev = dev;
481
482 ipc->tx_data_max_size = SKL_ADSP_W1_SZ;
483 ipc->rx_data_max_size = SKL_ADSP_W0_UP_SZ;
484
485 err = sst_ipc_init(ipc);
486 if (err)
487 return err;
488
489 ipc->ops.tx_msg = skl_ipc_tx_msg;
490 ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
491 ipc->ops.is_dsp_busy = skl_ipc_is_dsp_busy;
492
493 return 0;
494}
495
496void skl_ipc_free(struct sst_generic_ipc *ipc)
497{
498 /* Disable IPC DONE interrupt */
499 sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
500 SKL_ADSP_REG_HIPCCTL_DONE, 0);
501
502 /* Disable IPC BUSY interrupt */
503 sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
504 SKL_ADSP_REG_HIPCCTL_BUSY, 0);
505
506 sst_ipc_fini(ipc);
507}
508
509int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
510 u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
511{
512 struct skl_ipc_header header = {0};
513 u64 *ipc_header = (u64 *)(&header);
514 int ret;
515
516 header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
517 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
518 header.primary |= IPC_GLB_TYPE(IPC_GLB_CREATE_PPL);
519 header.primary |= IPC_INSTANCE_ID(instance_id);
520 header.primary |= IPC_PPL_TYPE(ppl_type);
521 header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
522
523 dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
524 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
525 if (ret < 0) {
526 dev_err(ipc->dev, "ipc: create pipeline fail, err: %d\n", ret);
527 return ret;
528 }
529
530 return ret;
531}
532EXPORT_SYMBOL_GPL(skl_ipc_create_pipeline);
533
534int skl_ipc_delete_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
535{
536 struct skl_ipc_header header = {0};
537 u64 *ipc_header = (u64 *)(&header);
538 int ret;
539
540 header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
541 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
542 header.primary |= IPC_GLB_TYPE(IPC_GLB_DELETE_PPL);
543 header.primary |= IPC_INSTANCE_ID(instance_id);
544
545 dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
546 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
547 if (ret < 0) {
548 dev_err(ipc->dev, "ipc: delete pipeline failed, err %d\n", ret);
549 return ret;
550 }
551
552 return 0;
553}
554EXPORT_SYMBOL_GPL(skl_ipc_delete_pipeline);
555
556int skl_ipc_set_pipeline_state(struct sst_generic_ipc *ipc,
557 u8 instance_id, enum skl_ipc_pipeline_state state)
558{
559 struct skl_ipc_header header = {0};
560 u64 *ipc_header = (u64 *)(&header);
561 int ret;
562
563 header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
564 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
565 header.primary |= IPC_GLB_TYPE(IPC_GLB_SET_PPL_STATE);
566 header.primary |= IPC_INSTANCE_ID(instance_id);
567 header.primary |= IPC_PPL_STATE(state);
568
569 dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
570 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
571 if (ret < 0) {
572 dev_err(ipc->dev, "ipc: set pipeline state failed, err: %d\n", ret);
573 return ret;
574 }
575 return ret;
576}
577EXPORT_SYMBOL_GPL(skl_ipc_set_pipeline_state);
578
579int
580skl_ipc_save_pipeline(struct sst_generic_ipc *ipc, u8 instance_id, int dma_id)
581{
582 struct skl_ipc_header header = {0};
583 u64 *ipc_header = (u64 *)(&header);
584 int ret;
585
586 header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
587 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
588 header.primary |= IPC_GLB_TYPE(IPC_GLB_SAVE_PPL);
589 header.primary |= IPC_INSTANCE_ID(instance_id);
590
591 header.extension = IPC_DMA_ID(dma_id);
592 dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
593 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
594 if (ret < 0) {
595 dev_err(ipc->dev, "ipc: save pipeline failed, err: %d\n", ret);
596 return ret;
597 }
598
599 return ret;
600}
601EXPORT_SYMBOL_GPL(skl_ipc_save_pipeline);
602
603int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
604{
605 struct skl_ipc_header header = {0};
606 u64 *ipc_header = (u64 *)(&header);
607 int ret;
608
609 header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
610 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
611 header.primary |= IPC_GLB_TYPE(IPC_GLB_RESTORE_PPL);
612 header.primary |= IPC_INSTANCE_ID(instance_id);
613
614 dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
615 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
616 if (ret < 0) {
617 dev_err(ipc->dev, "ipc: restore pipeline failed, err: %d\n", ret);
618 return ret;
619 }
620
621 return ret;
622}
623EXPORT_SYMBOL_GPL(skl_ipc_restore_pipeline);
624
625int skl_ipc_set_dx(struct sst_generic_ipc *ipc, u8 instance_id,
626 u16 module_id, struct skl_ipc_dxstate_info *dx)
627{
628 struct skl_ipc_header header = {0};
629 u64 *ipc_header = (u64 *)(&header);
630 int ret;
631
632 header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
633 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
634 header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_DX);
635 header.primary |= IPC_MOD_INSTANCE_ID(instance_id);
636 header.primary |= IPC_MOD_ID(module_id);
637
638 dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
639 header.primary, header.extension);
640 ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
641 dx, sizeof(dx), NULL, 0);
642 if (ret < 0) {
643 dev_err(ipc->dev, "ipc: set dx failed, err %d\n", ret);
644 return ret;
645 }
646
647 return ret;
648}
649EXPORT_SYMBOL_GPL(skl_ipc_set_dx);
650
651int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
652 struct skl_ipc_init_instance_msg *msg, void *param_data)
653{
654 struct skl_ipc_header header = {0};
655 u64 *ipc_header = (u64 *)(&header);
656 int ret;
657 u32 *buffer = (u32 *)param_data;
658 /* param_block_size must be in dwords */
659 u16 param_block_size = msg->param_data_size / sizeof(u32);
660
661 print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
662 16, 4, buffer, param_block_size, false);
663
664 header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
665 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
666 header.primary |= IPC_GLB_TYPE(IPC_MOD_INIT_INSTANCE);
667 header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
668 header.primary |= IPC_MOD_ID(msg->module_id);
669
670 header.extension = IPC_CORE_ID(msg->core_id);
671 header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id);
672 header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size);
673
674 dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
675 header.primary, header.extension);
676 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, param_data,
677 msg->param_data_size, NULL, 0);
678
679 if (ret < 0) {
680 dev_err(ipc->dev, "ipc: init instance failed\n");
681 return ret;
682 }
683
684 return ret;
685}
686EXPORT_SYMBOL_GPL(skl_ipc_init_instance);
687
688int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
689 struct skl_ipc_bind_unbind_msg *msg)
690{
691 struct skl_ipc_header header = {0};
692 u64 *ipc_header = (u64 *)(&header);
693 u8 bind_unbind = msg->bind ? IPC_MOD_BIND : IPC_MOD_UNBIND;
694 int ret;
695
696 header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
697 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
698 header.primary |= IPC_GLB_TYPE(bind_unbind);
699 header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
700 header.primary |= IPC_MOD_ID(msg->module_id);
701
702 header.extension = IPC_DST_MOD_ID(msg->dst_module_id);
703 header.extension |= IPC_DST_MOD_INSTANCE_ID(msg->dst_instance_id);
704 header.extension |= IPC_DST_QUEUE(msg->dst_queue);
705 header.extension |= IPC_SRC_QUEUE(msg->src_queue);
706
707 dev_dbg(ipc->dev, "In %s hdr=%x ext=%x\n", __func__, header.primary,
708 header.extension);
709 ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
710 if (ret < 0) {
711 dev_err(ipc->dev, "ipc: bind/unbind faileden");
712 return ret;
713 }
714
715 return ret;
716}
717EXPORT_SYMBOL_GPL(skl_ipc_bind_unbind);
718
719int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
720 struct skl_ipc_large_config_msg *msg, u32 *param)
721{
722 struct skl_ipc_header header = {0};
723 u64 *ipc_header = (u64 *)(&header);
724 int ret = 0;
725 size_t sz_remaining, tx_size, data_offset;
726
727 header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
728 header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
729 header.primary |= IPC_GLB_TYPE(IPC_MOD_LARGE_CONFIG_SET);
730 header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
731 header.primary |= IPC_MOD_ID(msg->module_id);
732
733 header.extension = IPC_DATA_OFFSET_SZ(msg->param_data_size);
734 header.extension |= IPC_LARGE_PARAM_ID(msg->large_param_id);
735 header.extension |= IPC_FINAL_BLOCK(0);
736 header.extension |= IPC_INITIAL_BLOCK(1);
737
738 sz_remaining = msg->param_data_size;
739 data_offset = 0;
740 while (sz_remaining != 0) {
741 tx_size = sz_remaining > SKL_ADSP_W1_SZ
742 ? SKL_ADSP_W1_SZ : sz_remaining;
743 if (tx_size == sz_remaining)
744 header.extension |= IPC_FINAL_BLOCK(1);
745
746 dev_dbg(ipc->dev, "In %s primary=%#x ext=%#x\n", __func__,
747 header.primary, header.extension);
748 dev_dbg(ipc->dev, "transmitting offset: %#x, size: %#x\n",
749 (unsigned)data_offset, (unsigned)tx_size);
750 ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
751 ((char *)param) + data_offset,
752 tx_size, NULL, 0);
753 if (ret < 0) {
754 dev_err(ipc->dev,
755 "ipc: set large config fail, err: %d\n", ret);
756 return ret;
757 }
758 sz_remaining -= tx_size;
759 data_offset = msg->param_data_size - sz_remaining;
760
761 /* clear the fields */
762 header.extension &= IPC_INITIAL_BLOCK_CLEAR;
763 header.extension &= IPC_DATA_OFFSET_SZ_CLEAR;
764 /* fill the fields */
765 header.extension |= IPC_INITIAL_BLOCK(0);
766 header.extension |= IPC_DATA_OFFSET_SZ(data_offset);
767 }
768
769 return ret;
770}
771EXPORT_SYMBOL_GPL(skl_ipc_set_large_config);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
new file mode 100644
index 000000000000..9f5f67202858
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -0,0 +1,125 @@
1/*
2 * Intel SKL IPC Support
3 *
4 * Copyright (C) 2014-15, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as version 2, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16#ifndef __SKL_IPC_H
17#define __SKL_IPC_H
18
19#include <linux/kthread.h>
20#include <linux/irqreturn.h>
21#include "../common/sst-ipc.h"
22
23struct sst_dsp;
24struct skl_sst;
25struct sst_generic_ipc;
26
27enum skl_ipc_pipeline_state {
28 PPL_INVALID_STATE = 0,
29 PPL_UNINITIALIZED = 1,
30 PPL_RESET = 2,
31 PPL_PAUSED = 3,
32 PPL_RUNNING = 4,
33 PPL_ERROR_STOP = 5,
34 PPL_SAVED = 6,
35 PPL_RESTORED = 7
36};
37
38struct skl_ipc_dxstate_info {
39 u32 core_mask;
40 u32 dx_mask;
41};
42
43struct skl_ipc_header {
44 u32 primary;
45 u32 extension;
46};
47
48struct skl_sst {
49 struct device *dev;
50 struct sst_dsp *dsp;
51
52 /* boot */
53 wait_queue_head_t boot_wait;
54 bool boot_complete;
55
56 /* IPC messaging */
57 struct sst_generic_ipc ipc;
58};
59
60struct skl_ipc_init_instance_msg {
61 u32 module_id;
62 u32 instance_id;
63 u16 param_data_size;
64 u8 ppl_instance_id;
65 u8 core_id;
66};
67
68struct skl_ipc_bind_unbind_msg {
69 u32 module_id;
70 u32 instance_id;
71 u32 dst_module_id;
72 u32 dst_instance_id;
73 u8 src_queue;
74 u8 dst_queue;
75 bool bind;
76};
77
78struct skl_ipc_large_config_msg {
79 u32 module_id;
80 u32 instance_id;
81 u32 large_param_id;
82 u32 param_data_size;
83};
84
85#define SKL_IPC_BOOT_MSECS 3000
86
87#define SKL_IPC_D3_MASK 0
88#define SKL_IPC_D0_MASK 3
89
90irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
91
92int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
93 u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
94
95int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
96
97int skl_ipc_set_pipeline_state(struct sst_generic_ipc *sst_ipc,
98 u8 instance_id, enum skl_ipc_pipeline_state state);
99
100int skl_ipc_save_pipeline(struct sst_generic_ipc *ipc,
101 u8 instance_id, int dma_id);
102
103int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
104
105int skl_ipc_init_instance(struct sst_generic_ipc *sst_ipc,
106 struct skl_ipc_init_instance_msg *msg, void *param_data);
107
108int skl_ipc_bind_unbind(struct sst_generic_ipc *sst_ipc,
109 struct skl_ipc_bind_unbind_msg *msg);
110
111int skl_ipc_set_dx(struct sst_generic_ipc *ipc,
112 u8 instance_id, u16 module_id, struct skl_ipc_dxstate_info *dx);
113
114int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
115 struct skl_ipc_large_config_msg *msg, u32 *param);
116
117void skl_ipc_int_enable(struct sst_dsp *dsp);
118void skl_ipc_op_int_enable(struct sst_dsp *ctx);
119void skl_ipc_int_disable(struct sst_dsp *dsp);
120
121bool skl_ipc_int_status(struct sst_dsp *dsp);
122void skl_ipc_free(struct sst_generic_ipc *ipc);
123int skl_ipc_init(struct device *dev, struct skl_sst *skl);
124
125#endif /* __SKL_IPC_H */
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
new file mode 100644
index 000000000000..c18ea51b7484
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -0,0 +1,280 @@
1/*
2 * skl-sst.c - HDA DSP library functions for SKL platform
3 *
4 * Copyright (C) 2014-15, Intel Corporation.
5 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
6 * Jeeja KP <jeeja.kp@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#include <linux/module.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include "../common/sst-dsp.h"
23#include "../common/sst-dsp-priv.h"
24#include "../common/sst-ipc.h"
25#include "skl-sst-ipc.h"
26
27#define SKL_BASEFW_TIMEOUT 300
28#define SKL_INIT_TIMEOUT 1000
29
30/* Intel HD Audio SRAM Window 0*/
31#define SKL_ADSP_SRAM0_BASE 0x8000
32
33/* Firmware status window */
34#define SKL_ADSP_FW_STATUS SKL_ADSP_SRAM0_BASE
35#define SKL_ADSP_ERROR_CODE (SKL_ADSP_FW_STATUS + 0x4)
36
37#define SKL_INSTANCE_ID 0
38#define SKL_BASE_FW_MODULE_ID 0
39
40static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
41{
42 u32 cur_sts;
43
44 cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
45
46 return (cur_sts == status);
47}
48
49static int skl_transfer_firmware(struct sst_dsp *ctx,
50 const void *basefw, u32 base_fw_size)
51{
52 int ret = 0;
53
54 ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size);
55 if (ret < 0)
56 return ret;
57
58 ret = sst_dsp_register_poll(ctx,
59 SKL_ADSP_FW_STATUS,
60 SKL_FW_STS_MASK,
61 SKL_FW_RFW_START,
62 SKL_BASEFW_TIMEOUT,
63 "Firmware boot");
64
65 ctx->cl_dev.ops.cl_stop_dma(ctx);
66
67 return ret;
68}
69
70static int skl_load_base_firmware(struct sst_dsp *ctx)
71{
72 int ret = 0, i;
73 const struct firmware *fw = NULL;
74 struct skl_sst *skl = ctx->thread_context;
75 u32 reg;
76
77 ret = request_firmware(&fw, "dsp_fw_release.bin", ctx->dev);
78 if (ret < 0) {
79 dev_err(ctx->dev, "Request firmware failed %d\n", ret);
80 skl_dsp_disable_core(ctx);
81 return -EIO;
82 }
83
84 /* enable Interrupt */
85 skl_ipc_int_enable(ctx);
86 skl_ipc_op_int_enable(ctx);
87
88 /* check ROM Status */
89 for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
90 if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
91 dev_dbg(ctx->dev,
92 "ROM loaded, we can continue with FW loading\n");
93 break;
94 }
95 mdelay(1);
96 }
97 if (!i) {
98 reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
99 dev_err(ctx->dev,
100 "Timeout waiting for ROM init done, reg:0x%x\n", reg);
101 ret = -EIO;
102 goto skl_load_base_firmware_failed;
103 }
104
105 ret = skl_transfer_firmware(ctx, fw->data, fw->size);
106 if (ret < 0) {
107 dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
108 goto skl_load_base_firmware_failed;
109 } else {
110 ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
111 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
112 if (ret == 0) {
113 dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
114 ret = -EIO;
115 goto skl_load_base_firmware_failed;
116 }
117
118 dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
119 skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
120 }
121 release_firmware(fw);
122
123 return 0;
124
125skl_load_base_firmware_failed:
126 skl_dsp_disable_core(ctx);
127 release_firmware(fw);
128 return ret;
129}
130
131static int skl_set_dsp_D0(struct sst_dsp *ctx)
132{
133 int ret;
134
135 ret = skl_load_base_firmware(ctx);
136 if (ret < 0) {
137 dev_err(ctx->dev, "unable to load firmware\n");
138 return ret;
139 }
140
141 skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
142
143 return ret;
144}
145
146static int skl_set_dsp_D3(struct sst_dsp *ctx)
147{
148 int ret;
149 struct skl_ipc_dxstate_info dx;
150 struct skl_sst *skl = ctx->thread_context;
151
152 dev_dbg(ctx->dev, "In %s:\n", __func__);
153 mutex_lock(&ctx->mutex);
154 if (!is_skl_dsp_running(ctx)) {
155 mutex_unlock(&ctx->mutex);
156 return 0;
157 }
158 mutex_unlock(&ctx->mutex);
159
160 dx.core_mask = SKL_DSP_CORE0_MASK;
161 dx.dx_mask = SKL_IPC_D3_MASK;
162 ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
163 if (ret < 0) {
164 dev_err(ctx->dev, "Failed to set DSP to D3 state\n");
165 return ret;
166 }
167
168 ret = skl_dsp_disable_core(ctx);
169 if (ret < 0) {
170 dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
171 ret = -EIO;
172 }
173 skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
174
175 return ret;
176}
177
178static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
179{
180 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
181}
182
183static struct skl_dsp_fw_ops skl_fw_ops = {
184 .set_state_D0 = skl_set_dsp_D0,
185 .set_state_D3 = skl_set_dsp_D3,
186 .load_fw = skl_load_base_firmware,
187 .get_fw_errcode = skl_get_errorcode,
188};
189
190static struct sst_ops skl_ops = {
191 .irq_handler = skl_dsp_sst_interrupt,
192 .write = sst_shim32_write,
193 .read = sst_shim32_read,
194 .ram_read = sst_memcpy_fromio_32,
195 .ram_write = sst_memcpy_toio_32,
196 .free = skl_dsp_free,
197};
198
199static struct sst_dsp_device skl_dev = {
200 .thread = skl_dsp_irq_thread_handler,
201 .ops = &skl_ops,
202};
203
204int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
205 struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
206{
207 struct skl_sst *skl;
208 struct sst_dsp *sst;
209 int ret;
210
211 skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
212 if (skl == NULL)
213 return -ENOMEM;
214
215 skl->dev = dev;
216 skl_dev.thread_context = skl;
217
218 skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
219 if (!skl->dsp) {
220 dev_err(skl->dev, "%s: no device\n", __func__);
221 return -ENODEV;
222 }
223
224 sst = skl->dsp;
225
226 sst->addr.lpe = mmio_base;
227 sst->addr.shim = mmio_base;
228 sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
229 SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
230
231 sst->dsp_ops = dsp_ops;
232 sst->fw_ops = skl_fw_ops;
233
234 ret = skl_ipc_init(dev, skl);
235 if (ret)
236 return ret;
237
238 skl->boot_complete = false;
239 init_waitqueue_head(&skl->boot_wait);
240
241 ret = skl_dsp_boot(sst);
242 if (ret < 0) {
243 dev_err(skl->dev, "Boot dsp core failed ret: %d", ret);
244 goto free_ipc;
245 }
246
247 ret = skl_cldma_prepare(sst);
248 if (ret < 0) {
249 dev_err(dev, "CL dma prepare failed : %d", ret);
250 goto free_ipc;
251 }
252
253
254 ret = sst->fw_ops.load_fw(sst);
255 if (ret < 0) {
256 dev_err(dev, "Load base fw failed : %d", ret);
257 return ret;
258 }
259
260 if (dsp)
261 *dsp = skl;
262
263 return 0;
264
265free_ipc:
266 skl_ipc_free(&skl->ipc);
267 return ret;
268}
269EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
270
271void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
272{
273 skl_ipc_free(&ctx->ipc);
274 ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
275 ctx->dsp->ops->free(ctx->dsp);
276}
277EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
278
279MODULE_LICENSE("GPL v2");
280MODULE_DESCRIPTION("Intel Skylake IPC driver");
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
new file mode 100644
index 000000000000..8c7767baa94f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -0,0 +1,286 @@
1/*
2 * skl_topology.h - Intel HDA Platform topology header file
3 *
4 * Copyright (C) 2014-15 Intel Corp
5 * Author: Jeeja KP <jeeja.kp@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 *
19 */
20
21#ifndef __SKL_TOPOLOGY_H__
22#define __SKL_TOPOLOGY_H__
23
24#include <linux/types.h>
25
26#include <sound/hdaudio_ext.h>
27#include <sound/soc.h>
28#include "skl.h"
29#include "skl-tplg-interface.h"
30
31#define BITS_PER_BYTE 8
32#define MAX_TS_GROUPS 8
33#define MAX_DMIC_TS_GROUPS 4
34#define MAX_FIXED_DMIC_PARAMS_SIZE 727
35
36/* Maximum number of coefficients up down mixer module */
37#define UP_DOWN_MIXER_MAX_COEFF 6
38
39enum skl_channel_index {
40 SKL_CHANNEL_LEFT = 0,
41 SKL_CHANNEL_RIGHT = 1,
42 SKL_CHANNEL_CENTER = 2,
43 SKL_CHANNEL_LEFT_SURROUND = 3,
44 SKL_CHANNEL_CENTER_SURROUND = 3,
45 SKL_CHANNEL_RIGHT_SURROUND = 4,
46 SKL_CHANNEL_LFE = 7,
47 SKL_CHANNEL_INVALID = 0xF,
48};
49
50enum skl_bitdepth {
51 SKL_DEPTH_8BIT = 8,
52 SKL_DEPTH_16BIT = 16,
53 SKL_DEPTH_24BIT = 24,
54 SKL_DEPTH_32BIT = 32,
55 SKL_DEPTH_INVALID
56};
57
58enum skl_interleaving {
59 /* [s1_ch1...s1_chN,...,sM_ch1...sM_chN] */
60 SKL_INTERLEAVING_PER_CHANNEL = 0,
61 /* [s1_ch1...sM_ch1,...,s1_chN...sM_chN] */
62 SKL_INTERLEAVING_PER_SAMPLE = 1,
63};
64
65enum skl_s_freq {
66 SKL_FS_8000 = 8000,
67 SKL_FS_11025 = 11025,
68 SKL_FS_12000 = 12000,
69 SKL_FS_16000 = 16000,
70 SKL_FS_22050 = 22050,
71 SKL_FS_24000 = 24000,
72 SKL_FS_32000 = 32000,
73 SKL_FS_44100 = 44100,
74 SKL_FS_48000 = 48000,
75 SKL_FS_64000 = 64000,
76 SKL_FS_88200 = 88200,
77 SKL_FS_96000 = 96000,
78 SKL_FS_128000 = 128000,
79 SKL_FS_176400 = 176400,
80 SKL_FS_192000 = 192000,
81 SKL_FS_INVALID
82};
83
84enum skl_widget_type {
85 SKL_WIDGET_VMIXER = 1,
86 SKL_WIDGET_MIXER = 2,
87 SKL_WIDGET_PGA = 3,
88 SKL_WIDGET_MUX = 4
89};
90
91struct skl_audio_data_format {
92 enum skl_s_freq s_freq;
93 enum skl_bitdepth bit_depth;
94 u32 channel_map;
95 enum skl_ch_cfg ch_cfg;
96 enum skl_interleaving interleaving;
97 u8 number_of_channels;
98 u8 valid_bit_depth;
99 u8 sample_type;
100 u8 reserved[1];
101} __packed;
102
103struct skl_base_cfg {
104 u32 cps;
105 u32 ibs;
106 u32 obs;
107 u32 is_pages;
108 struct skl_audio_data_format audio_fmt;
109};
110
111struct skl_cpr_gtw_cfg {
112 u32 node_id;
113 u32 dma_buffer_size;
114 u32 config_length;
115 /* not mandatory; required only for DMIC/I2S */
116 u32 config_data[1];
117} __packed;
118
119struct skl_cpr_cfg {
120 struct skl_base_cfg base_cfg;
121 struct skl_audio_data_format out_fmt;
122 u32 cpr_feature_mask;
123 struct skl_cpr_gtw_cfg gtw_cfg;
124} __packed;
125
126
127struct skl_src_module_cfg {
128 struct skl_base_cfg base_cfg;
129 enum skl_s_freq src_cfg;
130} __packed;
131
132struct skl_up_down_mixer_cfg {
133 struct skl_base_cfg base_cfg;
134 enum skl_ch_cfg out_ch_cfg;
135 /* This should be set to 1 if user coefficients are required */
136 u32 coeff_sel;
137 /* Pass the user coeff in this array */
138 s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
139} __packed;
140
141enum skl_dma_type {
142 SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0,
143 SKL_DMA_HDA_HOST_INPUT_CLASS = 1,
144 SKL_DMA_HDA_HOST_INOUT_CLASS = 2,
145 SKL_DMA_HDA_LINK_OUTPUT_CLASS = 8,
146 SKL_DMA_HDA_LINK_INPUT_CLASS = 9,
147 SKL_DMA_HDA_LINK_INOUT_CLASS = 0xA,
148 SKL_DMA_DMIC_LINK_INPUT_CLASS = 0xB,
149 SKL_DMA_I2S_LINK_OUTPUT_CLASS = 0xC,
150 SKL_DMA_I2S_LINK_INPUT_CLASS = 0xD,
151};
152
153union skl_ssp_dma_node {
154 u8 val;
155 struct {
156 u8 dual_mono:1;
157 u8 time_slot:3;
158 u8 i2s_instance:4;
159 } dma_node;
160};
161
162union skl_connector_node_id {
163 u32 val;
164 struct {
165 u32 vindex:8;
166 u32 dma_type:4;
167 u32 rsvd:20;
168 } node;
169};
170
171struct skl_module_fmt {
172 u32 channels;
173 u32 s_freq;
174 u32 bit_depth;
175 u32 valid_bit_depth;
176 u32 ch_cfg;
177};
178
179struct skl_module_inst_id {
180 u32 module_id;
181 u32 instance_id;
182};
183
184struct skl_module_pin {
185 struct skl_module_inst_id id;
186 u8 pin_index;
187 bool is_dynamic;
188 bool in_use;
189};
190
191struct skl_specific_cfg {
192 u32 caps_size;
193 u32 *caps;
194};
195
196enum skl_pipe_state {
197 SKL_PIPE_INVALID = 0,
198 SKL_PIPE_CREATED = 1,
199 SKL_PIPE_PAUSED = 2,
200 SKL_PIPE_STARTED = 3
201};
202
203struct skl_pipe_module {
204 struct snd_soc_dapm_widget *w;
205 struct list_head node;
206};
207
208struct skl_pipe_params {
209 u8 host_dma_id;
210 u8 link_dma_id;
211 u32 ch;
212 u32 s_freq;
213 u32 s_fmt;
214 u8 linktype;
215 int stream;
216};
217
218struct skl_pipe {
219 u8 ppl_id;
220 u8 pipe_priority;
221 u16 conn_type;
222 u32 memory_pages;
223 struct skl_pipe_params *p_params;
224 enum skl_pipe_state state;
225 struct list_head w_list;
226};
227
228enum skl_module_state {
229 SKL_MODULE_UNINIT = 0,
230 SKL_MODULE_INIT_DONE = 1,
231 SKL_MODULE_LOADED = 2,
232 SKL_MODULE_UNLOADED = 3,
233 SKL_MODULE_BIND_DONE = 4
234};
235
236struct skl_module_cfg {
237 struct skl_module_inst_id id;
238 struct skl_module_fmt in_fmt;
239 struct skl_module_fmt out_fmt;
240 u8 max_in_queue;
241 u8 max_out_queue;
242 u8 in_queue_mask;
243 u8 out_queue_mask;
244 u8 in_queue;
245 u8 out_queue;
246 u32 mcps;
247 u32 ibs;
248 u32 obs;
249 u8 is_loadable;
250 u8 core_id;
251 u8 dev_type;
252 u8 dma_id;
253 u8 time_slot;
254 u32 params_fixup;
255 u32 converter;
256 u32 vbus_id;
257 struct skl_module_pin *m_in_pin;
258 struct skl_module_pin *m_out_pin;
259 enum skl_module_type m_type;
260 enum skl_hw_conn_type hw_conn_type;
261 enum skl_module_state m_state;
262 struct skl_pipe *pipe;
263 struct skl_specific_cfg formats_config;
264};
265
266int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
267
268int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
269
270int skl_pause_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
271
272int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
273
274int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
275
276int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config,
277 char *param);
278
279int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
280 *src_module, struct skl_module_cfg *dst_module);
281
282int skl_unbind_modules(struct skl_sst *ctx, struct skl_module_cfg
283 *src_module, struct skl_module_cfg *dst_module);
284
285enum skl_bitdepth skl_get_bit_depth(int params);
286#endif
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
new file mode 100644
index 000000000000..a50689825bca
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -0,0 +1,88 @@
1/*
2 * skl-tplg-interface.h - Intel DSP FW private data interface
3 *
4 * Copyright (C) 2015 Intel Corp
5 * Author: Jeeja KP <jeeja.kp@intel.com>
6 * Nilofer, Samreen <samreen.nilofer@intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#ifndef __HDA_TPLG_INTERFACE_H__
20#define __HDA_TPLG_INTERFACE_H__
21
22/**
23 * enum skl_ch_cfg - channel configuration
24 *
25 * @SKL_CH_CFG_MONO: One channel only
26 * @SKL_CH_CFG_STEREO: L & R
27 * @SKL_CH_CFG_2_1: L, R & LFE
28 * @SKL_CH_CFG_3_0: L, C & R
29 * @SKL_CH_CFG_3_1: L, C, R & LFE
30 * @SKL_CH_CFG_QUATRO: L, R, Ls & Rs
31 * @SKL_CH_CFG_4_0: L, C, R & Cs
32 * @SKL_CH_CFG_5_0: L, C, R, Ls & Rs
33 * @SKL_CH_CFG_5_1: L, C, R, Ls, Rs & LFE
34 * @SKL_CH_CFG_DUAL_MONO: One channel replicated in two
35 * @SKL_CH_CFG_I2S_DUAL_STEREO_0: Stereo(L,R) in 4 slots, 1st stream:[ L, R, -, - ]
36 * @SKL_CH_CFG_I2S_DUAL_STEREO_1: Stereo(L,R) in 4 slots, 2nd stream:[ -, -, L, R ]
37 * @SKL_CH_CFG_INVALID: Invalid
38 */
39enum skl_ch_cfg {
40 SKL_CH_CFG_MONO = 0,
41 SKL_CH_CFG_STEREO = 1,
42 SKL_CH_CFG_2_1 = 2,
43 SKL_CH_CFG_3_0 = 3,
44 SKL_CH_CFG_3_1 = 4,
45 SKL_CH_CFG_QUATRO = 5,
46 SKL_CH_CFG_4_0 = 6,
47 SKL_CH_CFG_5_0 = 7,
48 SKL_CH_CFG_5_1 = 8,
49 SKL_CH_CFG_DUAL_MONO = 9,
50 SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
51 SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
52 SKL_CH_CFG_INVALID
53};
54
55enum skl_module_type {
56 SKL_MODULE_TYPE_MIXER = 0,
57 SKL_MODULE_TYPE_COPIER,
58 SKL_MODULE_TYPE_UPDWMIX,
59 SKL_MODULE_TYPE_SRCINT
60};
61
62enum skl_core_affinity {
63 SKL_AFFINITY_CORE_0 = 0,
64 SKL_AFFINITY_CORE_1,
65 SKL_AFFINITY_CORE_MAX
66};
67
68enum skl_pipe_conn_type {
69 SKL_PIPE_CONN_TYPE_NONE = 0,
70 SKL_PIPE_CONN_TYPE_FE,
71 SKL_PIPE_CONN_TYPE_BE
72};
73
74enum skl_hw_conn_type {
75 SKL_CONN_NONE = 0,
76 SKL_CONN_SOURCE = 1,
77 SKL_CONN_SINK = 2
78};
79
80enum skl_dev_type {
81 SKL_DEVICE_BT = 0x0,
82 SKL_DEVICE_DMIC = 0x1,
83 SKL_DEVICE_I2S = 0x2,
84 SKL_DEVICE_SLIMBUS = 0x3,
85 SKL_DEVICE_HDALINK = 0x4,
86 SKL_DEVICE_NONE
87};
88#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
new file mode 100644
index 000000000000..348d094e81d6
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.c
@@ -0,0 +1,536 @@
1/*
2 * skl.c - Implementation of ASoC Intel SKL HD Audio driver
3 *
4 * Copyright (C) 2014-2015 Intel Corp
5 * Author: Jeeja KP <jeeja.kp@intel.com>
6 *
7 * Derived mostly from Intel HDA driver with following copyrights:
8 * Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
9 * PeiSen Hou <pshou@realtek.com.tw>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2 of the License.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
22 */
23
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/pm_runtime.h>
27#include <linux/platform_device.h>
28#include <sound/pcm.h>
29#include "skl.h"
30
31/*
32 * initialize the PCI registers
33 */
34static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
35 unsigned char mask, unsigned char val)
36{
37 unsigned char data;
38
39 pci_read_config_byte(pci, reg, &data);
40 data &= ~mask;
41 data |= (val & mask);
42 pci_write_config_byte(pci, reg, data);
43}
44
45static void skl_init_pci(struct skl *skl)
46{
47 struct hdac_ext_bus *ebus = &skl->ebus;
48
49 /*
50 * Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
51 * TCSEL == Traffic Class Select Register, which sets PCI express QOS
52 * Ensuring these bits are 0 clears playback static on some HD Audio
53 * codecs.
54 * The PCI register TCSEL is defined in the Intel manuals.
55 */
56 dev_dbg(ebus_to_hbus(ebus)->dev, "Clearing TCSEL\n");
57 skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
58}
59
60/* called from IRQ */
61static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
62{
63 snd_pcm_period_elapsed(hstr->substream);
64}
65
66static irqreturn_t skl_interrupt(int irq, void *dev_id)
67{
68 struct hdac_ext_bus *ebus = dev_id;
69 struct hdac_bus *bus = ebus_to_hbus(ebus);
70 u32 status;
71
72 if (!pm_runtime_active(bus->dev))
73 return IRQ_NONE;
74
75 spin_lock(&bus->reg_lock);
76
77 status = snd_hdac_chip_readl(bus, INTSTS);
78 if (status == 0 || status == 0xffffffff) {
79 spin_unlock(&bus->reg_lock);
80 return IRQ_NONE;
81 }
82
83 /* clear rirb int */
84 status = snd_hdac_chip_readb(bus, RIRBSTS);
85 if (status & RIRB_INT_MASK) {
86 if (status & RIRB_INT_RESPONSE)
87 snd_hdac_bus_update_rirb(bus);
88 snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
89 }
90
91 spin_unlock(&bus->reg_lock);
92
93 return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
94}
95
96static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
97{
98 struct hdac_ext_bus *ebus = dev_id;
99 struct hdac_bus *bus = ebus_to_hbus(ebus);
100 u32 status;
101
102 status = snd_hdac_chip_readl(bus, INTSTS);
103
104 snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update);
105
106 return IRQ_HANDLED;
107}
108
109static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
110{
111 struct skl *skl = ebus_to_skl(ebus);
112 struct hdac_bus *bus = ebus_to_hbus(ebus);
113 int ret;
114
115 ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
116 skl_threaded_handler,
117 IRQF_SHARED,
118 KBUILD_MODNAME, ebus);
119 if (ret) {
120 dev_err(bus->dev,
121 "unable to grab IRQ %d, disabling device\n",
122 skl->pci->irq);
123 return ret;
124 }
125
126 bus->irq = skl->pci->irq;
127 pci_intx(skl->pci, 1);
128
129 return 0;
130}
131
132#ifdef CONFIG_PM_SLEEP
133/*
134 * power management
135 */
136static int skl_suspend(struct device *dev)
137{
138 struct pci_dev *pci = to_pci_dev(dev);
139 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
140 struct hdac_bus *bus = ebus_to_hbus(ebus);
141
142 snd_hdac_bus_stop_chip(bus);
143 snd_hdac_bus_enter_link_reset(bus);
144
145 return 0;
146}
147
148static int skl_resume(struct device *dev)
149{
150 struct pci_dev *pci = to_pci_dev(dev);
151 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
152 struct hdac_bus *bus = ebus_to_hbus(ebus);
153 struct skl *hda = ebus_to_skl(ebus);
154
155 skl_init_pci(hda);
156
157 snd_hdac_bus_init_chip(bus, 1);
158
159 return 0;
160}
161#endif /* CONFIG_PM_SLEEP */
162
163#ifdef CONFIG_PM
164static int skl_runtime_suspend(struct device *dev)
165{
166 struct pci_dev *pci = to_pci_dev(dev);
167 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
168 struct hdac_bus *bus = ebus_to_hbus(ebus);
169
170 dev_dbg(bus->dev, "in %s\n", __func__);
171
172 /* enable controller wake up event */
173 snd_hdac_chip_updatew(bus, WAKEEN, 0, STATESTS_INT_MASK);
174
175 snd_hdac_bus_stop_chip(bus);
176 snd_hdac_bus_enter_link_reset(bus);
177
178 return 0;
179}
180
181static int skl_runtime_resume(struct device *dev)
182{
183 struct pci_dev *pci = to_pci_dev(dev);
184 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
185 struct hdac_bus *bus = ebus_to_hbus(ebus);
186 struct skl *hda = ebus_to_skl(ebus);
187 int status;
188
189 dev_dbg(bus->dev, "in %s\n", __func__);
190
191 /* Read STATESTS before controller reset */
192 status = snd_hdac_chip_readw(bus, STATESTS);
193
194 skl_init_pci(hda);
195 snd_hdac_bus_init_chip(bus, true);
196 /* disable controller Wake Up event */
197 snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
198
199 return 0;
200}
201#endif /* CONFIG_PM */
202
203static const struct dev_pm_ops skl_pm = {
204 SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
205 SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
206};
207
208/*
209 * destructor
210 */
211static int skl_free(struct hdac_ext_bus *ebus)
212{
213 struct skl *skl = ebus_to_skl(ebus);
214 struct hdac_bus *bus = ebus_to_hbus(ebus);
215
216 skl->init_failed = 1; /* to be sure */
217
218 snd_hdac_ext_stop_streams(ebus);
219
220 if (bus->irq >= 0)
221 free_irq(bus->irq, (void *)bus);
222 if (bus->remap_addr)
223 iounmap(bus->remap_addr);
224
225 snd_hdac_bus_free_stream_pages(bus);
226 snd_hdac_stream_free_all(ebus);
227 snd_hdac_link_free_all(ebus);
228 pci_release_regions(skl->pci);
229 pci_disable_device(skl->pci);
230
231 snd_hdac_ext_bus_exit(ebus);
232
233 return 0;
234}
235
236static int skl_dmic_device_register(struct skl *skl)
237{
238 struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
239 struct platform_device *pdev;
240 int ret;
241
242 /* SKL has one dmic port, so allocate dmic device for this */
243 pdev = platform_device_alloc("dmic-codec", -1);
244 if (!pdev) {
245 dev_err(bus->dev, "failed to allocate dmic device\n");
246 return -ENOMEM;
247 }
248
249 ret = platform_device_add(pdev);
250 if (ret) {
251 dev_err(bus->dev, "failed to add dmic device: %d\n", ret);
252 platform_device_put(pdev);
253 return ret;
254 }
255 skl->dmic_dev = pdev;
256
257 return 0;
258}
259
260static void skl_dmic_device_unregister(struct skl *skl)
261{
262 if (skl->dmic_dev)
263 platform_device_unregister(skl->dmic_dev);
264}
265
266/*
267 * Probe the given codec address
268 */
269static int probe_codec(struct hdac_ext_bus *ebus, int addr)
270{
271 struct hdac_bus *bus = ebus_to_hbus(ebus);
272 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
273 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
274 unsigned int res;
275
276 mutex_lock(&bus->cmd_mutex);
277 snd_hdac_bus_send_cmd(bus, cmd);
278 snd_hdac_bus_get_response(bus, addr, &res);
279 mutex_unlock(&bus->cmd_mutex);
280 if (res == -1)
281 return -EIO;
282 dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
283
284 return snd_hdac_ext_bus_device_init(ebus, addr);
285}
286
287/* Codec initialization */
288static int skl_codec_create(struct hdac_ext_bus *ebus)
289{
290 struct hdac_bus *bus = ebus_to_hbus(ebus);
291 int c, max_slots;
292
293 max_slots = HDA_MAX_CODECS;
294
295 /* First try to probe all given codec slots */
296 for (c = 0; c < max_slots; c++) {
297 if ((bus->codec_mask & (1 << c))) {
298 if (probe_codec(ebus, c) < 0) {
299 /*
300 * Some BIOSen give you wrong codec addresses
301 * that don't exist
302 */
303 dev_warn(bus->dev,
304 "Codec #%d probe error; disabling it...\n", c);
305 bus->codec_mask &= ~(1 << c);
306 /*
307 * More badly, accessing to a non-existing
308 * codec often screws up the controller bus,
309 * and disturbs the further communications.
310 * Thus if an error occurs during probing,
311 * better to reset the controller bus to get
312 * back to the sanity state.
313 */
314 snd_hdac_bus_stop_chip(bus);
315 snd_hdac_bus_init_chip(bus, true);
316 }
317 }
318 }
319
320 return 0;
321}
322
323static const struct hdac_bus_ops bus_core_ops = {
324 .command = snd_hdac_bus_send_cmd,
325 .get_response = snd_hdac_bus_get_response,
326};
327
328/*
329 * constructor
330 */
331static int skl_create(struct pci_dev *pci,
332 const struct hdac_io_ops *io_ops,
333 struct skl **rskl)
334{
335 struct skl *skl;
336 struct hdac_ext_bus *ebus;
337
338 int err;
339
340 *rskl = NULL;
341
342 err = pci_enable_device(pci);
343 if (err < 0)
344 return err;
345
346 skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL);
347 if (!skl) {
348 pci_disable_device(pci);
349 return -ENOMEM;
350 }
351 ebus = &skl->ebus;
352 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
353 ebus->bus.use_posbuf = 1;
354 skl->pci = pci;
355
356 ebus->bus.bdl_pos_adj = 0;
357
358 *rskl = skl;
359
360 return 0;
361}
362
363static int skl_first_init(struct hdac_ext_bus *ebus)
364{
365 struct skl *skl = ebus_to_skl(ebus);
366 struct hdac_bus *bus = ebus_to_hbus(ebus);
367 struct pci_dev *pci = skl->pci;
368 int err;
369 unsigned short gcap;
370 int cp_streams, pb_streams, start_idx;
371
372 err = pci_request_regions(pci, "Skylake HD audio");
373 if (err < 0)
374 return err;
375
376 bus->addr = pci_resource_start(pci, 0);
377 bus->remap_addr = pci_ioremap_bar(pci, 0);
378 if (bus->remap_addr == NULL) {
379 dev_err(bus->dev, "ioremap error\n");
380 return -ENXIO;
381 }
382
383 snd_hdac_ext_bus_parse_capabilities(ebus);
384
385 if (skl_acquire_irq(ebus, 0) < 0)
386 return -EBUSY;
387
388 pci_set_master(pci);
389 synchronize_irq(bus->irq);
390
391 gcap = snd_hdac_chip_readw(bus, GCAP);
392 dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
393
394 /* allow 64bit DMA address if supported by H/W */
395 if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
396 dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
397 } else {
398 dma_set_mask(bus->dev, DMA_BIT_MASK(32));
399 dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
400 }
401
402 /* read number of streams from GCAP register */
403 cp_streams = (gcap >> 8) & 0x0f;
404 pb_streams = (gcap >> 12) & 0x0f;
405
406 if (!pb_streams && !cp_streams)
407 return -EIO;
408
409 ebus->num_streams = cp_streams + pb_streams;
410
411 /* initialize streams */
412 snd_hdac_ext_stream_init_all
413 (ebus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
414 start_idx = cp_streams;
415 snd_hdac_ext_stream_init_all
416 (ebus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
417
418 err = snd_hdac_bus_alloc_stream_pages(bus);
419 if (err < 0)
420 return err;
421
422 /* initialize chip */
423 skl_init_pci(skl);
424
425 snd_hdac_bus_init_chip(bus, true);
426
427 /* codec detection */
428 if (!bus->codec_mask) {
429 dev_err(bus->dev, "no codecs found!\n");
430 return -ENODEV;
431 }
432
433 return 0;
434}
435
436static int skl_probe(struct pci_dev *pci,
437 const struct pci_device_id *pci_id)
438{
439 struct skl *skl;
440 struct hdac_ext_bus *ebus = NULL;
441 struct hdac_bus *bus = NULL;
442 int err;
443
444 /* we use ext core ops, so provide NULL for ops here */
445 err = skl_create(pci, NULL, &skl);
446 if (err < 0)
447 return err;
448
449 ebus = &skl->ebus;
450 bus = ebus_to_hbus(ebus);
451
452 err = skl_first_init(ebus);
453 if (err < 0)
454 goto out_free;
455
456 pci_set_drvdata(skl->pci, ebus);
457
458 /* check if dsp is there */
459 if (ebus->ppcap) {
460 /* TODO register with dsp IPC */
461 dev_dbg(bus->dev, "Register dsp\n");
462 }
463
464 if (ebus->mlcap)
465 snd_hdac_ext_bus_get_ml_capabilities(ebus);
466
467 /* create device for soc dmic */
468 err = skl_dmic_device_register(skl);
469 if (err < 0)
470 goto out_free;
471
472 /* register platform dai and controls */
473 err = skl_platform_register(bus->dev);
474 if (err < 0)
475 goto out_dmic_free;
476
477 /* create codec instances */
478 err = skl_codec_create(ebus);
479 if (err < 0)
480 goto out_unregister;
481
482 /*configure PM */
483 pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
484 pm_runtime_use_autosuspend(bus->dev);
485 pm_runtime_put_noidle(bus->dev);
486 pm_runtime_allow(bus->dev);
487
488 return 0;
489
490out_unregister:
491 skl_platform_unregister(bus->dev);
492out_dmic_free:
493 skl_dmic_device_unregister(skl);
494out_free:
495 skl->init_failed = 1;
496 skl_free(ebus);
497
498 return err;
499}
500
501static void skl_remove(struct pci_dev *pci)
502{
503 struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
504 struct skl *skl = ebus_to_skl(ebus);
505
506 if (pci_dev_run_wake(pci))
507 pm_runtime_get_noresume(&pci->dev);
508 pci_dev_put(pci);
509 skl_platform_unregister(&pci->dev);
510 skl_dmic_device_unregister(skl);
511 skl_free(ebus);
512 dev_set_drvdata(&pci->dev, NULL);
513}
514
515/* PCI IDs */
516static const struct pci_device_id skl_ids[] = {
517 /* Sunrise Point-LP */
518 { PCI_DEVICE(0x8086, 0x9d70), 0},
519 { 0, }
520};
521MODULE_DEVICE_TABLE(pci, skl_ids);
522
523/* pci_driver definition */
524static struct pci_driver skl_driver = {
525 .name = KBUILD_MODNAME,
526 .id_table = skl_ids,
527 .probe = skl_probe,
528 .remove = skl_remove,
529 .driver = {
530 .pm = &skl_pm,
531 },
532};
533module_pci_driver(skl_driver);
534
535MODULE_LICENSE("GPL v2");
536MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
new file mode 100644
index 000000000000..f7fdbb02947f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.h
@@ -0,0 +1,84 @@
1/*
2 * skl.h - HD Audio skylake defintions.
3 *
4 * Copyright (C) 2015 Intel Corp
5 * Author: Jeeja KP <jeeja.kp@intel.com>
6 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 *
19 */
20
21#ifndef __SOUND_SOC_SKL_H
22#define __SOUND_SOC_SKL_H
23
24#include <sound/hda_register.h>
25#include <sound/hdaudio_ext.h>
26#include "skl-nhlt.h"
27
28#define SKL_SUSPEND_DELAY 2000
29
30/* Vendor Specific Registers */
31#define AZX_REG_VS_EM1 0x1000
32#define AZX_REG_VS_INRC 0x1004
33#define AZX_REG_VS_OUTRC 0x1008
34#define AZX_REG_VS_FIFOTRK 0x100C
35#define AZX_REG_VS_FIFOTRK2 0x1010
36#define AZX_REG_VS_EM2 0x1030
37#define AZX_REG_VS_EM3L 0x1038
38#define AZX_REG_VS_EM3U 0x103C
39#define AZX_REG_VS_EM4L 0x1040
40#define AZX_REG_VS_EM4U 0x1044
41#define AZX_REG_VS_LTRC 0x1048
42#define AZX_REG_VS_D0I3C 0x104A
43#define AZX_REG_VS_PCE 0x104B
44#define AZX_REG_VS_L2MAGC 0x1050
45#define AZX_REG_VS_L2LAHPT 0x1054
46#define AZX_REG_VS_SDXDPIB_XBASE 0x1084
47#define AZX_REG_VS_SDXDPIB_XINTERVAL 0x20
48#define AZX_REG_VS_SDXEFIFOS_XBASE 0x1094
49#define AZX_REG_VS_SDXEFIFOS_XINTERVAL 0x20
50
51struct skl {
52 struct hdac_ext_bus ebus;
53 struct pci_dev *pci;
54
55 unsigned int init_failed:1; /* delayed init failed */
56 struct platform_device *dmic_dev;
57
58 void __iomem *nhlt; /* nhlt ptr */
59 struct skl_sst *skl_sst; /* sst skl ctx */
60};
61
62#define skl_to_ebus(s) (&(s)->ebus)
63#define ebus_to_skl(sbus) \
64 container_of(sbus, struct skl, sbus)
65
66/* to pass dai dma data */
67struct skl_dma_params {
68 u32 format;
69 u8 stream_tag;
70};
71
72int skl_platform_unregister(struct device *dev);
73int skl_platform_register(struct device *dev);
74
75void __iomem *skl_nhlt_init(struct device *dev);
76void skl_nhlt_free(void __iomem *addr);
77struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
78 u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
79
80int skl_init_dsp(struct skl *skl);
81void skl_free_dsp(struct skl *skl);
82int skl_suspend_dsp(struct skl *skl);
83int skl_resume_dsp(struct skl *skl);
84#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 4cf2245950d7..dbfdfe99c69d 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -148,10 +148,14 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
148 dram = mv_mbus_dram_info(); 148 dram = mv_mbus_dram_info();
149 addr = substream->dma_buffer.addr; 149 addr = substream->dma_buffer.addr;
150 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 150 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
151 if (priv->substream_play)
152 return -EBUSY;
151 priv->substream_play = substream; 153 priv->substream_play = substream;
152 kirkwood_dma_conf_mbus_windows(priv->io, 154 kirkwood_dma_conf_mbus_windows(priv->io,
153 KIRKWOOD_PLAYBACK_WIN, addr, dram); 155 KIRKWOOD_PLAYBACK_WIN, addr, dram);
154 } else { 156 } else {
157 if (priv->substream_rec)
158 return -EBUSY;
155 priv->substream_rec = substream; 159 priv->substream_rec = substream;
156 kirkwood_dma_conf_mbus_windows(priv->io, 160 kirkwood_dma_conf_mbus_windows(priv->io,
157 KIRKWOOD_RECORD_WIN, addr, dram); 161 KIRKWOOD_RECORD_WIN, addr, dram);
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 4d44b5803e55..684e8a78bed0 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
103 .name = "MAX98090 Playback", 103 .name = "MAX98090 Playback",
104 .stream_name = "MAX98090 Playback", 104 .stream_name = "MAX98090 Playback",
105 .cpu_dai_name = "DL1", 105 .cpu_dai_name = "DL1",
106 .platform_name = "11220000.mt8173-afe-pcm",
107 .codec_name = "snd-soc-dummy", 106 .codec_name = "snd-soc-dummy",
108 .codec_dai_name = "snd-soc-dummy-dai", 107 .codec_dai_name = "snd-soc-dummy-dai",
109 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 108 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
114 .name = "MAX98090 Capture", 113 .name = "MAX98090 Capture",
115 .stream_name = "MAX98090 Capture", 114 .stream_name = "MAX98090 Capture",
116 .cpu_dai_name = "VUL", 115 .cpu_dai_name = "VUL",
117 .platform_name = "11220000.mt8173-afe-pcm",
118 .codec_name = "snd-soc-dummy", 116 .codec_name = "snd-soc-dummy",
119 .codec_dai_name = "snd-soc-dummy-dai", 117 .codec_dai_name = "snd-soc-dummy-dai",
120 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 118 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
125 { 123 {
126 .name = "Codec", 124 .name = "Codec",
127 .cpu_dai_name = "I2S", 125 .cpu_dai_name = "I2S",
128 .platform_name = "11220000.mt8173-afe-pcm",
129 .no_pcm = 1, 126 .no_pcm = 1,
130 .codec_dai_name = "HiFi", 127 .codec_dai_name = "HiFi",
131 .init = mt8173_max98090_init, 128 .init = mt8173_max98090_init,
@@ -139,6 +136,7 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
139 136
140static struct snd_soc_card mt8173_max98090_card = { 137static struct snd_soc_card mt8173_max98090_card = {
141 .name = "mt8173-max98090", 138 .name = "mt8173-max98090",
139 .owner = THIS_MODULE,
142 .dai_link = mt8173_max98090_dais, 140 .dai_link = mt8173_max98090_dais,
143 .num_links = ARRAY_SIZE(mt8173_max98090_dais), 141 .num_links = ARRAY_SIZE(mt8173_max98090_dais),
144 .controls = mt8173_max98090_controls, 142 .controls = mt8173_max98090_controls,
@@ -152,9 +150,21 @@ static struct snd_soc_card mt8173_max98090_card = {
152static int mt8173_max98090_dev_probe(struct platform_device *pdev) 150static int mt8173_max98090_dev_probe(struct platform_device *pdev)
153{ 151{
154 struct snd_soc_card *card = &mt8173_max98090_card; 152 struct snd_soc_card *card = &mt8173_max98090_card;
155 struct device_node *codec_node; 153 struct device_node *codec_node, *platform_node;
156 int ret, i; 154 int ret, i;
157 155
156 platform_node = of_parse_phandle(pdev->dev.of_node,
157 "mediatek,platform", 0);
158 if (!platform_node) {
159 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
160 return -EINVAL;
161 }
162 for (i = 0; i < card->num_links; i++) {
163 if (mt8173_max98090_dais[i].platform_name)
164 continue;
165 mt8173_max98090_dais[i].platform_of_node = platform_node;
166 }
167
158 codec_node = of_parse_phandle(pdev->dev.of_node, 168 codec_node = of_parse_phandle(pdev->dev.of_node,
159 "mediatek,audio-codec", 0); 169 "mediatek,audio-codec", 0);
160 if (!codec_node) { 170 if (!codec_node) {
@@ -193,7 +203,6 @@ MODULE_DEVICE_TABLE(of, mt8173_max98090_dt_match);
193static struct platform_driver mt8173_max98090_driver = { 203static struct platform_driver mt8173_max98090_driver = {
194 .driver = { 204 .driver = {
195 .name = "mt8173-max98090", 205 .name = "mt8173-max98090",
196 .owner = THIS_MODULE,
197 .of_match_table = mt8173_max98090_dt_match, 206 .of_match_table = mt8173_max98090_dt_match,
198#ifdef CONFIG_PM 207#ifdef CONFIG_PM
199 .pm = &snd_soc_pm_ops, 208 .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 094055323059..86cf9752f18a 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
138 .name = "rt5650_rt5676 Playback", 138 .name = "rt5650_rt5676 Playback",
139 .stream_name = "rt5650_rt5676 Playback", 139 .stream_name = "rt5650_rt5676 Playback",
140 .cpu_dai_name = "DL1", 140 .cpu_dai_name = "DL1",
141 .platform_name = "11220000.mt8173-afe-pcm",
142 .codec_name = "snd-soc-dummy", 141 .codec_name = "snd-soc-dummy",
143 .codec_dai_name = "snd-soc-dummy-dai", 142 .codec_dai_name = "snd-soc-dummy-dai",
144 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 143 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
149 .name = "rt5650_rt5676 Capture", 148 .name = "rt5650_rt5676 Capture",
150 .stream_name = "rt5650_rt5676 Capture", 149 .stream_name = "rt5650_rt5676 Capture",
151 .cpu_dai_name = "VUL", 150 .cpu_dai_name = "VUL",
152 .platform_name = "11220000.mt8173-afe-pcm",
153 .codec_name = "snd-soc-dummy", 151 .codec_name = "snd-soc-dummy",
154 .codec_dai_name = "snd-soc-dummy-dai", 152 .codec_dai_name = "snd-soc-dummy-dai",
155 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 153 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
161 { 159 {
162 .name = "Codec", 160 .name = "Codec",
163 .cpu_dai_name = "I2S", 161 .cpu_dai_name = "I2S",
164 .platform_name = "11220000.mt8173-afe-pcm",
165 .no_pcm = 1, 162 .no_pcm = 1,
166 .codecs = mt8173_rt5650_rt5676_codecs, 163 .codecs = mt8173_rt5650_rt5676_codecs,
167 .num_codecs = 2, 164 .num_codecs = 2,
@@ -194,6 +191,7 @@ static struct snd_soc_codec_conf mt8173_rt5650_rt5676_codec_conf[] = {
194 191
195static struct snd_soc_card mt8173_rt5650_rt5676_card = { 192static struct snd_soc_card mt8173_rt5650_rt5676_card = {
196 .name = "mtk-rt5650-rt5676", 193 .name = "mtk-rt5650-rt5676",
194 .owner = THIS_MODULE,
197 .dai_link = mt8173_rt5650_rt5676_dais, 195 .dai_link = mt8173_rt5650_rt5676_dais,
198 .num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais), 196 .num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais),
199 .codec_conf = mt8173_rt5650_rt5676_codec_conf, 197 .codec_conf = mt8173_rt5650_rt5676_codec_conf,
@@ -209,7 +207,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
209static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) 207static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
210{ 208{
211 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card; 209 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
212 int ret; 210 struct device_node *platform_node;
211 int i, ret;
212
213 platform_node = of_parse_phandle(pdev->dev.of_node,
214 "mediatek,platform", 0);
215 if (!platform_node) {
216 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
217 return -EINVAL;
218 }
219
220 for (i = 0; i < card->num_links; i++) {
221 if (mt8173_rt5650_rt5676_dais[i].platform_name)
222 continue;
223 mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
224 }
213 225
214 mt8173_rt5650_rt5676_codecs[0].of_node = 226 mt8173_rt5650_rt5676_codecs[0].of_node =
215 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0); 227 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
@@ -258,7 +270,6 @@ MODULE_DEVICE_TABLE(of, mt8173_rt5650_rt5676_dt_match);
258static struct platform_driver mt8173_rt5650_rt5676_driver = { 270static struct platform_driver mt8173_rt5650_rt5676_driver = {
259 .driver = { 271 .driver = {
260 .name = "mtk-rt5650-rt5676", 272 .name = "mtk-rt5650-rt5676",
261 .owner = THIS_MODULE,
262 .of_match_table = mt8173_rt5650_rt5676_dt_match, 273 .of_match_table = mt8173_rt5650_rt5676_dt_match,
263#ifdef CONFIG_PM 274#ifdef CONFIG_PM
264 .pm = &snd_soc_pm_ops, 275 .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mtk-afe-common.h b/sound/soc/mediatek/mtk-afe-common.h
index a88b17511fdf..cc4393cb1130 100644
--- a/sound/soc/mediatek/mtk-afe-common.h
+++ b/sound/soc/mediatek/mtk-afe-common.h
@@ -98,12 +98,4 @@ struct mtk_afe_memif {
98 const struct mtk_afe_irq_data *irqdata; 98 const struct mtk_afe_irq_data *irqdata;
99}; 99};
100 100
101struct mtk_afe {
102 /* address for ioremap audio hardware register */
103 void __iomem *base_addr;
104 struct device *dev;
105 struct regmap *regmap;
106 struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
107 struct clk *clocks[MTK_CLK_NUM];
108};
109#endif 101#endif
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index cc228db5fb76..d190fe017559 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -45,18 +45,21 @@
45/* Memory interface */ 45/* Memory interface */
46#define AFE_DL1_BASE 0x0040 46#define AFE_DL1_BASE 0x0040
47#define AFE_DL1_CUR 0x0044 47#define AFE_DL1_CUR 0x0044
48#define AFE_DL1_END 0x0048
48#define AFE_DL2_BASE 0x0050 49#define AFE_DL2_BASE 0x0050
49#define AFE_DL2_CUR 0x0054 50#define AFE_DL2_CUR 0x0054
50#define AFE_AWB_BASE 0x0070 51#define AFE_AWB_BASE 0x0070
51#define AFE_AWB_CUR 0x007c 52#define AFE_AWB_CUR 0x007c
52#define AFE_VUL_BASE 0x0080 53#define AFE_VUL_BASE 0x0080
53#define AFE_VUL_CUR 0x008c 54#define AFE_VUL_CUR 0x008c
55#define AFE_VUL_END 0x0088
54#define AFE_DAI_BASE 0x0090 56#define AFE_DAI_BASE 0x0090
55#define AFE_DAI_CUR 0x009c 57#define AFE_DAI_CUR 0x009c
56#define AFE_MOD_PCM_BASE 0x0330 58#define AFE_MOD_PCM_BASE 0x0330
57#define AFE_MOD_PCM_CUR 0x033c 59#define AFE_MOD_PCM_CUR 0x033c
58#define AFE_HDMI_OUT_BASE 0x0374 60#define AFE_HDMI_OUT_BASE 0x0374
59#define AFE_HDMI_OUT_CUR 0x0378 61#define AFE_HDMI_OUT_CUR 0x0378
62#define AFE_HDMI_OUT_END 0x037c
60 63
61#define AFE_ADDA2_TOP_CON0 0x0600 64#define AFE_ADDA2_TOP_CON0 0x0600
62 65
@@ -127,6 +130,34 @@ enum afe_tdm_ch_start {
127 AFE_TDM_CH_ZERO, 130 AFE_TDM_CH_ZERO,
128}; 131};
129 132
133static const unsigned int mtk_afe_backup_list[] = {
134 AUDIO_TOP_CON0,
135 AFE_CONN1,
136 AFE_CONN2,
137 AFE_CONN7,
138 AFE_CONN8,
139 AFE_DAC_CON1,
140 AFE_DL1_BASE,
141 AFE_DL1_END,
142 AFE_VUL_BASE,
143 AFE_VUL_END,
144 AFE_HDMI_OUT_BASE,
145 AFE_HDMI_OUT_END,
146 AFE_HDMI_CONN0,
147 AFE_DAC_CON0,
148};
149
150struct mtk_afe {
151 /* address for ioremap audio hardware register */
152 void __iomem *base_addr;
153 struct device *dev;
154 struct regmap *regmap;
155 struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
156 struct clk *clocks[MTK_CLK_NUM];
157 unsigned int backup_regs[ARRAY_SIZE(mtk_afe_backup_list)];
158 bool suspended;
159};
160
130static const struct snd_pcm_hardware mtk_afe_hardware = { 161static const struct snd_pcm_hardware mtk_afe_hardware = {
131 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 162 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
132 SNDRV_PCM_INFO_MMAP_VALID), 163 SNDRV_PCM_INFO_MMAP_VALID),
@@ -722,11 +753,53 @@ static const struct snd_soc_dai_ops mtk_afe_hdmi_ops = {
722 753
723}; 754};
724 755
756static int mtk_afe_runtime_suspend(struct device *dev);
757static int mtk_afe_runtime_resume(struct device *dev);
758
759static int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
760{
761 struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
762 int i;
763
764 dev_dbg(afe->dev, "%s\n", __func__);
765 if (pm_runtime_status_suspended(afe->dev) || afe->suspended)
766 return 0;
767
768 for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
769 regmap_read(afe->regmap, mtk_afe_backup_list[i],
770 &afe->backup_regs[i]);
771
772 afe->suspended = true;
773 mtk_afe_runtime_suspend(afe->dev);
774 return 0;
775}
776
777static int mtk_afe_dai_resume(struct snd_soc_dai *dai)
778{
779 struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
780 int i = 0;
781
782 dev_dbg(afe->dev, "%s\n", __func__);
783 if (pm_runtime_status_suspended(afe->dev) || !afe->suspended)
784 return 0;
785
786 mtk_afe_runtime_resume(afe->dev);
787
788 for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
789 regmap_write(afe->regmap, mtk_afe_backup_list[i],
790 afe->backup_regs[i]);
791
792 afe->suspended = false;
793 return 0;
794}
795
725static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = { 796static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
726 /* FE DAIs: memory intefaces to CPU */ 797 /* FE DAIs: memory intefaces to CPU */
727 { 798 {
728 .name = "DL1", /* downlink 1 */ 799 .name = "DL1", /* downlink 1 */
729 .id = MTK_AFE_MEMIF_DL1, 800 .id = MTK_AFE_MEMIF_DL1,
801 .suspend = mtk_afe_dai_suspend,
802 .resume = mtk_afe_dai_resume,
730 .playback = { 803 .playback = {
731 .stream_name = "DL1", 804 .stream_name = "DL1",
732 .channels_min = 1, 805 .channels_min = 1,
@@ -738,6 +811,8 @@ static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
738 }, { 811 }, {
739 .name = "VUL", /* voice uplink */ 812 .name = "VUL", /* voice uplink */
740 .id = MTK_AFE_MEMIF_VUL, 813 .id = MTK_AFE_MEMIF_VUL,
814 .suspend = mtk_afe_dai_suspend,
815 .resume = mtk_afe_dai_resume,
741 .capture = { 816 .capture = {
742 .stream_name = "VUL", 817 .stream_name = "VUL",
743 .channels_min = 1, 818 .channels_min = 1,
@@ -774,6 +849,8 @@ static struct snd_soc_dai_driver mtk_afe_hdmi_dais[] = {
774 { 849 {
775 .name = "HDMI", 850 .name = "HDMI",
776 .id = MTK_AFE_MEMIF_HDMI, 851 .id = MTK_AFE_MEMIF_HDMI,
852 .suspend = mtk_afe_dai_suspend,
853 .resume = mtk_afe_dai_resume,
777 .playback = { 854 .playback = {
778 .stream_name = "HDMI", 855 .stream_name = "HDMI",
779 .channels_min = 2, 856 .channels_min = 2,
@@ -820,10 +897,6 @@ static const struct snd_kcontrol_new mtk_afe_o10_mix[] = {
820}; 897};
821 898
822static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = { 899static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
823 /* Backend DAIs */
824 SND_SOC_DAPM_AIF_IN("I2S Capture", NULL, 0, SND_SOC_NOPM, 0, 0),
825 SND_SOC_DAPM_AIF_OUT("I2S Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
826
827 /* inter-connections */ 900 /* inter-connections */
828 SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0), 901 SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
829 SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0), 902 SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -855,11 +928,6 @@ static const struct snd_soc_dapm_route mtk_afe_pcm_routes[] = {
855 { "O10", "I18 Switch", "I18" }, 928 { "O10", "I18 Switch", "I18" },
856}; 929};
857 930
858static const struct snd_soc_dapm_widget mtk_afe_hdmi_widgets[] = {
859 /* Backend DAIs */
860 SND_SOC_DAPM_AIF_OUT("HDMIO Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
861};
862
863static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = { 931static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = {
864 {"HDMIO Playback", NULL, "HDMI"}, 932 {"HDMIO Playback", NULL, "HDMI"},
865}; 933};
@@ -874,8 +942,6 @@ static const struct snd_soc_component_driver mtk_afe_pcm_dai_component = {
874 942
875static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = { 943static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = {
876 .name = "mtk-afe-hdmi-dai", 944 .name = "mtk-afe-hdmi-dai",
877 .dapm_widgets = mtk_afe_hdmi_widgets,
878 .num_dapm_widgets = ARRAY_SIZE(mtk_afe_hdmi_widgets),
879 .dapm_routes = mtk_afe_hdmi_routes, 945 .dapm_routes = mtk_afe_hdmi_routes,
880 .num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes), 946 .num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes),
881}; 947};
@@ -1199,6 +1265,8 @@ err_pm_disable:
1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev) 1265static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
1200{ 1266{
1201 pm_runtime_disable(&pdev->dev); 1267 pm_runtime_disable(&pdev->dev);
1268 if (!pm_runtime_status_suspended(&pdev->dev))
1269 mtk_afe_runtime_suspend(&pdev->dev);
1202 snd_soc_unregister_component(&pdev->dev); 1270 snd_soc_unregister_component(&pdev->dev);
1203 snd_soc_unregister_platform(&pdev->dev); 1271 snd_soc_unregister_platform(&pdev->dev);
1204 return 0; 1272 return 0;
@@ -1218,7 +1286,6 @@ static const struct dev_pm_ops mtk_afe_pm_ops = {
1218static struct platform_driver mtk_afe_pcm_driver = { 1286static struct platform_driver mtk_afe_pcm_driver = {
1219 .driver = { 1287 .driver = {
1220 .name = "mtk-afe-pcm", 1288 .name = "mtk-afe-pcm",
1221 .owner = THIS_MODULE,
1222 .of_match_table = mtk_afe_pcm_dt_match, 1289 .of_match_table = mtk_afe_pcm_dt_match,
1223 .pm = &mtk_afe_pm_ops, 1290 .pm = &mtk_afe_pm_ops,
1224 }, 1291 },
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 5ae5ca15b6d6..e09326158bc2 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -308,13 +308,7 @@ static struct snd_soc_platform_driver nuc900_soc_platform = {
308 308
309static int nuc900_soc_platform_probe(struct platform_device *pdev) 309static int nuc900_soc_platform_probe(struct platform_device *pdev)
310{ 310{
311 return snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform); 311 return devm_snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
312}
313
314static int nuc900_soc_platform_remove(struct platform_device *pdev)
315{
316 snd_soc_unregister_platform(&pdev->dev);
317 return 0;
318} 312}
319 313
320static struct platform_driver nuc900_pcm_driver = { 314static struct platform_driver nuc900_pcm_driver = {
@@ -323,7 +317,6 @@ static struct platform_driver nuc900_pcm_driver = {
323 }, 317 },
324 318
325 .probe = nuc900_soc_platform_probe, 319 .probe = nuc900_soc_platform_probe,
326 .remove = nuc900_soc_platform_remove,
327}; 320};
328 321
329module_platform_driver(nuc900_pcm_driver); 322module_platform_driver(nuc900_pcm_driver);
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 68a125205375..c7563e230c7d 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -965,25 +965,15 @@ int omap_mcbsp_init(struct platform_device *pdev)
965 mcbsp->free = true; 965 mcbsp->free = true;
966 966
967 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu"); 967 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
968 if (!res) { 968 if (!res)
969 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 969 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
970 if (!res) { 970
971 dev_err(mcbsp->dev, "invalid memory resource\n"); 971 mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
972 return -ENOMEM; 972 if (IS_ERR(mcbsp->io_base))
973 } 973 return PTR_ERR(mcbsp->io_base);
974 }
975 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
976 dev_name(&pdev->dev))) {
977 dev_err(mcbsp->dev, "memory region already claimed\n");
978 return -ENODEV;
979 }
980 974
981 mcbsp->phys_base = res->start; 975 mcbsp->phys_base = res->start;
982 mcbsp->reg_cache_size = resource_size(res); 976 mcbsp->reg_cache_size = resource_size(res);
983 mcbsp->io_base = devm_ioremap(&pdev->dev, res->start,
984 resource_size(res));
985 if (!mcbsp->io_base)
986 return -ENOMEM;
987 977
988 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma"); 978 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
989 if (!res) 979 if (!res)
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index aeef25c0cb3d..584b2372339e 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -81,7 +81,15 @@ static int hdmi_dai_startup(struct snd_pcm_substream *substream,
81 ret = snd_pcm_hw_constraint_step(substream->runtime, 0, 81 ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
82 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128); 82 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
83 if (ret < 0) { 83 if (ret < 0) {
84 dev_err(dai->dev, "could not apply constraint\n"); 84 dev_err(dai->dev, "Could not apply period constraint: %d\n",
85 ret);
86 return ret;
87 }
88 ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
89 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 128);
90 if (ret < 0) {
91 dev_err(dai->dev, "Could not apply buffer constraint: %d\n",
92 ret);
85 return ret; 93 return ret;
86 } 94 }
87 95
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 076bec606d78..732e749a1f8e 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -154,8 +154,7 @@ static const struct snd_soc_dapm_route omap3pandora_map[] = {
154 154
155static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd) 155static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
156{ 156{
157 struct snd_soc_codec *codec = rtd->codec; 157 struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
158 struct snd_soc_dapm_context *dapm = &codec->dapm;
159 158
160 /* All TWL4030 output pins are floating */ 159 /* All TWL4030 output pins are floating */
161 snd_soc_dapm_nc_pin(dapm, "EARPIECE"); 160 snd_soc_dapm_nc_pin(dapm, "EARPIECE");
@@ -174,8 +173,7 @@ static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
174 173
175static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd) 174static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
176{ 175{
177 struct snd_soc_codec *codec = rtd->codec; 176 struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
178 struct snd_soc_dapm_context *dapm = &codec->dapm;
179 177
180 /* Not comnnected */ 178 /* Not comnnected */
181 snd_soc_dapm_nc_pin(dapm, "HSMIC"); 179 snd_soc_dapm_nc_pin(dapm, "HSMIC");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 1eb45dcfb8e8..51e790d006f5 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -232,13 +232,7 @@ static int mmp_pcm_probe(struct platform_device *pdev)
232 mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max = 232 mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max =
233 pdata->period_max_capture; 233 pdata->period_max_capture;
234 } 234 }
235 return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform); 235 return devm_snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
236}
237
238static int mmp_pcm_remove(struct platform_device *pdev)
239{
240 snd_soc_unregister_platform(&pdev->dev);
241 return 0;
242} 236}
243 237
244static struct platform_driver mmp_pcm_driver = { 238static struct platform_driver mmp_pcm_driver = {
@@ -247,7 +241,6 @@ static struct platform_driver mmp_pcm_driver = {
247 }, 241 },
248 242
249 .probe = mmp_pcm_probe, 243 .probe = mmp_pcm_probe,
250 .remove = mmp_pcm_remove,
251}; 244};
252 245
253module_platform_driver(mmp_pcm_driver); 246module_platform_driver(mmp_pcm_driver);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index fbe2e93d6edc..3da485ec1de7 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -813,14 +813,8 @@ static const struct of_device_id pxa_ssp_of_ids[] = {
813 813
814static int asoc_ssp_probe(struct platform_device *pdev) 814static int asoc_ssp_probe(struct platform_device *pdev)
815{ 815{
816 return snd_soc_register_component(&pdev->dev, &pxa_ssp_component, 816 return devm_snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
817 &pxa_ssp_dai, 1); 817 &pxa_ssp_dai, 1);
818}
819
820static int asoc_ssp_remove(struct platform_device *pdev)
821{
822 snd_soc_unregister_component(&pdev->dev);
823 return 0;
824} 818}
825 819
826static struct platform_driver asoc_ssp_driver = { 820static struct platform_driver asoc_ssp_driver = {
@@ -830,7 +824,6 @@ static struct platform_driver asoc_ssp_driver = {
830 }, 824 },
831 825
832 .probe = asoc_ssp_probe, 826 .probe = asoc_ssp_probe,
833 .remove = asoc_ssp_remove,
834}; 827};
835 828
836module_platform_driver(asoc_ssp_driver); 829module_platform_driver(asoc_ssp_driver);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index e68290c15328..6b4e40036910 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -367,19 +367,12 @@ static const struct snd_soc_component_driver pxa_i2s_component = {
367 367
368static int pxa2xx_i2s_drv_probe(struct platform_device *pdev) 368static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
369{ 369{
370 return snd_soc_register_component(&pdev->dev, &pxa_i2s_component, 370 return devm_snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
371 &pxa_i2s_dai, 1); 371 &pxa_i2s_dai, 1);
372}
373
374static int pxa2xx_i2s_drv_remove(struct platform_device *pdev)
375{
376 snd_soc_unregister_component(&pdev->dev);
377 return 0;
378} 372}
379 373
380static struct platform_driver pxa2xx_i2s_driver = { 374static struct platform_driver pxa2xx_i2s_driver = {
381 .probe = pxa2xx_i2s_drv_probe, 375 .probe = pxa2xx_i2s_drv_probe,
382 .remove = pxa2xx_i2s_drv_remove,
383 376
384 .driver = { 377 .driver = {
385 .name = "pxa2xx-i2s", 378 .name = "pxa2xx-i2s",
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index a51c9da66614..831ee37d2e3e 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -124,13 +124,7 @@ static struct snd_soc_platform_driver pxa2xx_soc_platform = {
124 124
125static int pxa2xx_soc_platform_probe(struct platform_device *pdev) 125static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
126{ 126{
127 return snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform); 127 return devm_snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
128}
129
130static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
131{
132 snd_soc_unregister_platform(&pdev->dev);
133 return 0;
134} 128}
135 129
136#ifdef CONFIG_OF 130#ifdef CONFIG_OF
@@ -147,7 +141,6 @@ static struct platform_driver pxa_pcm_driver = {
147 }, 141 },
148 142
149 .probe = pxa2xx_soc_platform_probe, 143 .probe = pxa2xx_soc_platform_probe,
150 .remove = pxa2xx_soc_platform_remove,
151}; 144};
152 145
153module_platform_driver(pxa_pcm_driver); 146module_platform_driver(pxa_pcm_driver);
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 807fedfa1c76..3cc252e55468 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -1,5 +1,6 @@
1config SND_SOC_QCOM 1config SND_SOC_QCOM
2 tristate "ASoC support for QCOM platforms" 2 tristate "ASoC support for QCOM platforms"
3 depends on ARCH_QCOM || COMPILE_TEST
3 help 4 help
4 Say Y or M if you want to add support to use audio devices 5 Say Y or M if you want to add support to use audio devices
5 in Qualcomm Technologies SOC-based platforms. 6 in Qualcomm Technologies SOC-based platforms.
@@ -14,19 +15,17 @@ config SND_SOC_LPASS_PLATFORM
14 15
15config SND_SOC_LPASS_IPQ806X 16config SND_SOC_LPASS_IPQ806X
16 tristate 17 tristate
17 depends on SND_SOC_QCOM
18 select SND_SOC_LPASS_CPU 18 select SND_SOC_LPASS_CPU
19 select SND_SOC_LPASS_PLATFORM 19 select SND_SOC_LPASS_PLATFORM
20 20
21config SND_SOC_LPASS_APQ8016 21config SND_SOC_LPASS_APQ8016
22 tristate 22 tristate
23 depends on SND_SOC_QCOM
24 select SND_SOC_LPASS_CPU 23 select SND_SOC_LPASS_CPU
25 select SND_SOC_LPASS_PLATFORM 24 select SND_SOC_LPASS_PLATFORM
26 25
27config SND_SOC_STORM 26config SND_SOC_STORM
28 tristate "ASoC I2S support for Storm boards" 27 tristate "ASoC I2S support for Storm boards"
29 depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST) 28 depends on SND_SOC_QCOM
30 select SND_SOC_LPASS_IPQ806X 29 select SND_SOC_LPASS_IPQ806X
31 select SND_SOC_MAX98357A 30 select SND_SOC_MAX98357A
32 help 31 help
@@ -35,7 +34,7 @@ config SND_SOC_STORM
35 34
36config SND_SOC_APQ8016_SBC 35config SND_SOC_APQ8016_SBC
37 tristate "SoC Audio support for APQ8016 SBC platforms" 36 tristate "SoC Audio support for APQ8016 SBC platforms"
38 depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST) 37 depends on SND_SOC_QCOM
39 select SND_SOC_LPASS_APQ8016 38 select SND_SOC_LPASS_APQ8016
40 help 39 help
41 Support for Qualcomm Technologies LPASS audio block in 40 Support for Qualcomm Technologies LPASS audio block in
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 23f3d59e6d09..97bc2023f08a 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -235,7 +235,7 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
235 return ret; 235 return ret;
236} 236}
237 237
238struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = { 238const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
239 .set_sysclk = lpass_cpu_daiops_set_sysclk, 239 .set_sysclk = lpass_cpu_daiops_set_sysclk,
240 .startup = lpass_cpu_daiops_startup, 240 .startup = lpass_cpu_daiops_startup,
241 .shutdown = lpass_cpu_daiops_shutdown, 241 .shutdown = lpass_cpu_daiops_shutdown,
diff --git a/sound/soc/qcom/lpass-ipq806x.c b/sound/soc/qcom/lpass-ipq806x.c
index 7356d3a766d6..7a4167952711 100644
--- a/sound/soc/qcom/lpass-ipq806x.c
+++ b/sound/soc/qcom/lpass-ipq806x.c
@@ -73,7 +73,7 @@ static int ipq806x_lpass_free_dma_channel(struct lpass_data *drvdata, int chan)
73 return 0; 73 return 0;
74} 74}
75 75
76struct lpass_variant ipq806x_data = { 76static struct lpass_variant ipq806x_data = {
77 .i2sctrl_reg_base = 0x0010, 77 .i2sctrl_reg_base = 0x0010,
78 .i2sctrl_reg_stride = 0x04, 78 .i2sctrl_reg_stride = 0x04,
79 .i2s_ports = 5, 79 .i2s_ports = 5,
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index d6e86c119e74..0b63e2e5bcc9 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -93,6 +93,6 @@ int asoc_qcom_lpass_platform_register(struct platform_device *);
93int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev); 93int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev);
94int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev); 94int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev);
95int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai); 95int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai);
96extern struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops; 96extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
97 97
98#endif /* __LPASS_H__ */ 98#endif /* __LPASS_H__ */
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index e18182699d83..58bae8e2cf5f 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -14,3 +14,22 @@ config SND_SOC_ROCKCHIP_I2S
14 Say Y or M if you want to add support for I2S driver for 14 Say Y or M if you want to add support for I2S driver for
15 Rockchip I2S device. The device supports upto maximum of 15 Rockchip I2S device. The device supports upto maximum of
16 8 channels each for play and record. 16 8 channels each for play and record.
17
18config SND_SOC_ROCKCHIP_MAX98090
19 tristate "ASoC support for Rockchip boards using a MAX98090 codec"
20 depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
21 select SND_SOC_ROCKCHIP_I2S
22 select SND_SOC_MAX98090
23 select SND_SOC_TS3A227E
24 help
25 Say Y or M here if you want to add support for SoC audio on Rockchip
26 boards using the MAX98090 codec, such as Veyron.
27
28config SND_SOC_ROCKCHIP_RT5645
29 tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
30 depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
31 select SND_SOC_ROCKCHIP_I2S
32 select SND_SOC_RT5645
33 help
34 Say Y or M here if you want to add support for SoC audio on Rockchip
35 boards using the RT5645/RT5650 codec, such as Veyron.
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index b9219092b47f..1bc1dc3c729a 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -2,3 +2,9 @@
2snd-soc-i2s-objs := rockchip_i2s.o 2snd-soc-i2s-objs := rockchip_i2s.o
3 3
4obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-i2s.o 4obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-i2s.o
5
6snd-soc-rockchip-max98090-objs := rockchip_max98090.o
7snd-soc-rockchip-rt5645-objs := rockchip_rt5645.o
8
9obj-$(CONFIG_SND_SOC_ROCKCHIP_MAX98090) += snd-soc-rockchip-max98090.o
10obj-$(CONFIG_SND_SOC_ROCKCHIP_RT5645) += snd-soc-rockchip-rt5645.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index acb5be53bfb4..b93610212e3d 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -483,16 +483,14 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
483 goto err_suspend; 483 goto err_suspend;
484 } 484 }
485 485
486 ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); 486 ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
487 if (ret) { 487 if (ret) {
488 dev_err(&pdev->dev, "Could not register PCM\n"); 488 dev_err(&pdev->dev, "Could not register PCM\n");
489 goto err_pcm_register; 489 return ret;
490 } 490 }
491 491
492 return 0; 492 return 0;
493 493
494err_pcm_register:
495 snd_dmaengine_pcm_unregister(&pdev->dev);
496err_suspend: 494err_suspend:
497 if (!pm_runtime_status_suspended(&pdev->dev)) 495 if (!pm_runtime_status_suspended(&pdev->dev))
498 i2s_runtime_suspend(&pdev->dev); 496 i2s_runtime_suspend(&pdev->dev);
@@ -512,8 +510,6 @@ static int rockchip_i2s_remove(struct platform_device *pdev)
512 510
513 clk_disable_unprepare(i2s->mclk); 511 clk_disable_unprepare(i2s->mclk);
514 clk_disable_unprepare(i2s->hclk); 512 clk_disable_unprepare(i2s->hclk);
515 snd_dmaengine_pcm_unregister(&pdev->dev);
516 snd_soc_unregister_component(&pdev->dev);
517 513
518 return 0; 514 return 0;
519} 515}
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
new file mode 100644
index 000000000000..26567b10393a
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -0,0 +1,236 @@
1/*
2 * Rockchip machine ASoC driver for boards using a MAX90809 CODEC.
3 *
4 * Copyright (c) 2014, ROCKCHIP CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/gpio.h>
24#include <linux/of_gpio.h>
25#include <sound/core.h>
26#include <sound/jack.h>
27#include <sound/pcm.h>
28#include <sound/pcm_params.h>
29#include <sound/soc.h>
30
31#include "rockchip_i2s.h"
32#include "../codecs/ts3a227e.h"
33
34#define DRV_NAME "rockchip-snd-max98090"
35
36static struct snd_soc_jack headset_jack;
37static struct snd_soc_jack_pin headset_jack_pins[] = {
38 {
39 .pin = "Headset Jack",
40 .mask = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
41 SND_JACK_BTN_0 | SND_JACK_BTN_1 |
42 SND_JACK_BTN_2 | SND_JACK_BTN_3,
43 },
44};
45
46static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
47 SND_SOC_DAPM_HP("Headphone", NULL),
48 SND_SOC_DAPM_MIC("Headset Mic", NULL),
49 SND_SOC_DAPM_MIC("Int Mic", NULL),
50 SND_SOC_DAPM_SPK("Speaker", NULL),
51};
52
53static const struct snd_soc_dapm_route rk_audio_map[] = {
54 {"IN34", NULL, "Headset Mic"},
55 {"IN34", NULL, "MICBIAS"},
56 {"MICBIAS", NULL, "Headset Mic"},
57 {"DMICL", NULL, "Int Mic"},
58 {"Headphone", NULL, "HPL"},
59 {"Headphone", NULL, "HPR"},
60 {"Speaker", NULL, "SPKL"},
61 {"Speaker", NULL, "SPKR"},
62};
63
64static const struct snd_kcontrol_new rk_mc_controls[] = {
65 SOC_DAPM_PIN_SWITCH("Headphone"),
66 SOC_DAPM_PIN_SWITCH("Headset Mic"),
67 SOC_DAPM_PIN_SWITCH("Int Mic"),
68 SOC_DAPM_PIN_SWITCH("Speaker"),
69};
70
71static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
72 struct snd_pcm_hw_params *params)
73{
74 int ret = 0;
75 struct snd_soc_pcm_runtime *rtd = substream->private_data;
76 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
77 struct snd_soc_dai *codec_dai = rtd->codec_dai;
78 int mclk;
79
80 switch (params_rate(params)) {
81 case 8000:
82 case 16000:
83 case 48000:
84 case 96000:
85 mclk = 12288000;
86 break;
87 case 44100:
88 mclk = 11289600;
89 break;
90 default:
91 return -EINVAL;
92 }
93
94 ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
95 SND_SOC_CLOCK_OUT);
96 if (ret < 0) {
97 dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
98 return ret;
99 }
100
101 ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
102 SND_SOC_CLOCK_IN);
103 if (ret < 0) {
104 dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
105 return ret;
106 }
107
108 return ret;
109}
110
111static int rk_init(struct snd_soc_pcm_runtime *runtime)
112{
113 /* Enable Headset and 4 Buttons Jack detection */
114 return snd_soc_card_jack_new(runtime->card, "Headset Jack",
115 SND_JACK_HEADSET |
116 SND_JACK_BTN_0 | SND_JACK_BTN_1 |
117 SND_JACK_BTN_2 | SND_JACK_BTN_3,
118 &headset_jack,
119 headset_jack_pins,
120 ARRAY_SIZE(headset_jack_pins));
121}
122
123static int rk_98090_headset_init(struct snd_soc_component *component)
124{
125 return ts3a227e_enable_jack_detect(component, &headset_jack);
126}
127
128static struct snd_soc_ops rk_aif1_ops = {
129 .hw_params = rk_aif1_hw_params,
130};
131
132static struct snd_soc_aux_dev rk_98090_headset_dev = {
133 .name = "Headset Chip",
134 .init = rk_98090_headset_init,
135};
136
137static struct snd_soc_dai_link rk_dailink = {
138 .name = "max98090",
139 .stream_name = "Audio",
140 .codec_dai_name = "HiFi",
141 .init = rk_init,
142 .ops = &rk_aif1_ops,
143 /* set max98090 as slave */
144 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
145 SND_SOC_DAIFMT_CBS_CFS,
146};
147
148static struct snd_soc_card snd_soc_card_rk = {
149 .name = "ROCKCHIP-I2S",
150 .owner = THIS_MODULE,
151 .dai_link = &rk_dailink,
152 .num_links = 1,
153 .aux_dev = &rk_98090_headset_dev,
154 .num_aux_devs = 1,
155 .dapm_widgets = rk_dapm_widgets,
156 .num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
157 .dapm_routes = rk_audio_map,
158 .num_dapm_routes = ARRAY_SIZE(rk_audio_map),
159 .controls = rk_mc_controls,
160 .num_controls = ARRAY_SIZE(rk_mc_controls),
161};
162
163static int snd_rk_mc_probe(struct platform_device *pdev)
164{
165 int ret = 0;
166 struct snd_soc_card *card = &snd_soc_card_rk;
167 struct device_node *np = pdev->dev.of_node;
168
169 /* register the soc card */
170 card->dev = &pdev->dev;
171
172 rk_dailink.codec_of_node = of_parse_phandle(np,
173 "rockchip,audio-codec", 0);
174 if (!rk_dailink.codec_of_node) {
175 dev_err(&pdev->dev,
176 "Property 'rockchip,audio-codec' missing or invalid\n");
177 return -EINVAL;
178 }
179
180 rk_dailink.cpu_of_node = of_parse_phandle(np,
181 "rockchip,i2s-controller", 0);
182 if (!rk_dailink.cpu_of_node) {
183 dev_err(&pdev->dev,
184 "Property 'rockchip,i2s-controller' missing or invalid\n");
185 return -EINVAL;
186 }
187
188 rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
189
190 rk_98090_headset_dev.codec_of_node = of_parse_phandle(np,
191 "rockchip,headset-codec", 0);
192 if (!rk_98090_headset_dev.codec_of_node) {
193 dev_err(&pdev->dev,
194 "Property 'rockchip,headset-codec' missing/invalid\n");
195 return -EINVAL;
196 }
197
198 ret = snd_soc_of_parse_card_name(card, "rockchip,model");
199 if (ret) {
200 dev_err(&pdev->dev,
201 "Soc parse card name failed %d\n", ret);
202 return ret;
203 }
204
205 ret = devm_snd_soc_register_card(&pdev->dev, card);
206 if (ret) {
207 dev_err(&pdev->dev,
208 "Soc register card failed %d\n", ret);
209 return ret;
210 }
211
212 return ret;
213}
214
215static const struct of_device_id rockchip_max98090_of_match[] = {
216 { .compatible = "rockchip,rockchip-audio-max98090", },
217 {},
218};
219
220MODULE_DEVICE_TABLE(of, rockchip_max98090_of_match);
221
222static struct platform_driver snd_rk_mc_driver = {
223 .probe = snd_rk_mc_probe,
224 .driver = {
225 .name = DRV_NAME,
226 .pm = &snd_soc_pm_ops,
227 .of_match_table = rockchip_max98090_of_match,
228 },
229};
230
231module_platform_driver(snd_rk_mc_driver);
232
233MODULE_AUTHOR("jianqun <jay.xu@rock-chips.com>");
234MODULE_DESCRIPTION("Rockchip max98090 machine ASoC driver");
235MODULE_LICENSE("GPL v2");
236MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
new file mode 100644
index 000000000000..68c62e4c2316
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -0,0 +1,225 @@
1/*
2 * Rockchip machine ASoC driver for boards using a RT5645/RT5650 CODEC.
3 *
4 * Copyright (c) 2015, ROCKCHIP CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23#include <linux/gpio.h>
24#include <linux/of_gpio.h>
25#include <linux/delay.h>
26#include <sound/core.h>
27#include <sound/jack.h>
28#include <sound/pcm.h>
29#include <sound/pcm_params.h>
30#include <sound/soc.h>
31#include "rockchip_i2s.h"
32
33#define DRV_NAME "rockchip-snd-rt5645"
34
35static struct snd_soc_jack headset_jack;
36
37/* Jack detect via rt5645 driver. */
38extern int rt5645_set_jack_detect(struct snd_soc_codec *codec,
39 struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
40 struct snd_soc_jack *btn_jack);
41
42static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
43 SND_SOC_DAPM_HP("Headphones", NULL),
44 SND_SOC_DAPM_SPK("Speakers", NULL),
45 SND_SOC_DAPM_MIC("Headset Mic", NULL),
46 SND_SOC_DAPM_MIC("Int Mic", NULL),
47};
48
49static const struct snd_soc_dapm_route rk_audio_map[] = {
50 /* Input Lines */
51 {"DMIC L2", NULL, "Int Mic"},
52 {"DMIC R2", NULL, "Int Mic"},
53 {"RECMIXL", NULL, "Headset Mic"},
54 {"RECMIXR", NULL, "Headset Mic"},
55
56 /* Output Lines */
57 {"Headphones", NULL, "HPOR"},
58 {"Headphones", NULL, "HPOL"},
59 {"Speakers", NULL, "SPOL"},
60 {"Speakers", NULL, "SPOR"},
61};
62
63static const struct snd_kcontrol_new rk_mc_controls[] = {
64 SOC_DAPM_PIN_SWITCH("Headphones"),
65 SOC_DAPM_PIN_SWITCH("Speakers"),
66 SOC_DAPM_PIN_SWITCH("Headset Mic"),
67 SOC_DAPM_PIN_SWITCH("Int Mic"),
68};
69
70static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
71 struct snd_pcm_hw_params *params)
72{
73 int ret = 0;
74 struct snd_soc_pcm_runtime *rtd = substream->private_data;
75 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
76 struct snd_soc_dai *codec_dai = rtd->codec_dai;
77 int mclk;
78
79 switch (params_rate(params)) {
80 case 8000:
81 case 16000:
82 case 48000:
83 case 96000:
84 mclk = 12288000;
85 break;
86 case 44100:
87 mclk = 11289600;
88 break;
89 default:
90 return -EINVAL;
91 }
92
93 ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
94 SND_SOC_CLOCK_OUT);
95 if (ret < 0) {
96 dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
97 return ret;
98 }
99
100 ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
101 SND_SOC_CLOCK_IN);
102 if (ret < 0) {
103 dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
104 return ret;
105 }
106
107 return ret;
108}
109
110static int rk_init(struct snd_soc_pcm_runtime *runtime)
111{
112 struct snd_soc_card *card = runtime->card;
113 int ret;
114
115 /* Enable Headset and 4 Buttons Jack detection */
116 ret = snd_soc_card_jack_new(card, "Headset Jack",
117 SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
118 SND_JACK_BTN_0 | SND_JACK_BTN_1 |
119 SND_JACK_BTN_2 | SND_JACK_BTN_3,
120 &headset_jack, NULL, 0);
121 if (ret) {
122 dev_err(card->dev, "New Headset Jack failed! (%d)\n", ret);
123 return ret;
124 }
125
126 return rt5645_set_jack_detect(runtime->codec,
127 &headset_jack,
128 &headset_jack,
129 &headset_jack);
130}
131
132static struct snd_soc_ops rk_aif1_ops = {
133 .hw_params = rk_aif1_hw_params,
134};
135
136static struct snd_soc_dai_link rk_dailink = {
137 .name = "rt5645",
138 .stream_name = "rt5645 PCM",
139 .codec_dai_name = "rt5645-aif1",
140 .init = rk_init,
141 .ops = &rk_aif1_ops,
142 /* set rt5645 as slave */
143 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
144 SND_SOC_DAIFMT_CBS_CFS,
145};
146
147static struct snd_soc_card snd_soc_card_rk = {
148 .name = "I2S-RT5650",
149 .owner = THIS_MODULE,
150 .dai_link = &rk_dailink,
151 .num_links = 1,
152 .dapm_widgets = rk_dapm_widgets,
153 .num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
154 .dapm_routes = rk_audio_map,
155 .num_dapm_routes = ARRAY_SIZE(rk_audio_map),
156 .controls = rk_mc_controls,
157 .num_controls = ARRAY_SIZE(rk_mc_controls),
158};
159
160static int snd_rk_mc_probe(struct platform_device *pdev)
161{
162 int ret = 0;
163 struct snd_soc_card *card = &snd_soc_card_rk;
164 struct device_node *np = pdev->dev.of_node;
165
166 /* register the soc card */
167 card->dev = &pdev->dev;
168
169 rk_dailink.codec_of_node = of_parse_phandle(np,
170 "rockchip,audio-codec", 0);
171 if (!rk_dailink.codec_of_node) {
172 dev_err(&pdev->dev,
173 "Property 'rockchip,audio-codec' missing or invalid\n");
174 return -EINVAL;
175 }
176
177 rk_dailink.cpu_of_node = of_parse_phandle(np,
178 "rockchip,i2s-controller", 0);
179 if (!rk_dailink.cpu_of_node) {
180 dev_err(&pdev->dev,
181 "Property 'rockchip,i2s-controller' missing or invalid\n");
182 return -EINVAL;
183 }
184
185 rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
186
187 ret = snd_soc_of_parse_card_name(card, "rockchip,model");
188 if (ret) {
189 dev_err(&pdev->dev,
190 "Soc parse card name failed %d\n", ret);
191 return ret;
192 }
193
194 ret = devm_snd_soc_register_card(&pdev->dev, card);
195 if (ret) {
196 dev_err(&pdev->dev,
197 "Soc register card failed %d\n", ret);
198 return ret;
199 }
200
201 return ret;
202}
203
204static const struct of_device_id rockchip_rt5645_of_match[] = {
205 { .compatible = "rockchip,rockchip-audio-rt5645", },
206 {},
207};
208
209MODULE_DEVICE_TABLE(of, rockchip_rt5645_of_match);
210
211static struct platform_driver snd_rk_mc_driver = {
212 .probe = snd_rk_mc_probe,
213 .driver = {
214 .name = DRV_NAME,
215 .pm = &snd_soc_pm_ops,
216 .of_match_table = rockchip_rt5645_of_match,
217 },
218};
219
220module_platform_driver(snd_rk_mc_driver);
221
222MODULE_AUTHOR("Xing Zheng <zhengxing@rock-chips.com>");
223MODULE_DESCRIPTION("Rockchip rt5645 machine ASoC driver");
224MODULE_LICENSE("GPL v2");
225MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
index 8bf2e2c4bafb..ee1fda92f2f4 100644
--- a/sound/soc/samsung/arndale_rt5631.c
+++ b/sound/soc/samsung/arndale_rt5631.c
@@ -71,6 +71,7 @@ static struct snd_soc_dai_link arndale_rt5631_dai[] = {
71 71
72static struct snd_soc_card arndale_rt5631 = { 72static struct snd_soc_card arndale_rt5631 = {
73 .name = "Arndale RT5631", 73 .name = "Arndale RT5631",
74 .owner = THIS_MODULE,
74 .dai_link = arndale_rt5631_dai, 75 .dai_link = arndale_rt5631_dai,
75 .num_links = ARRAY_SIZE(arndale_rt5631_dai), 76 .num_links = ARRAY_SIZE(arndale_rt5631_dai),
76}; 77};
@@ -116,15 +117,6 @@ static int arndale_audio_probe(struct platform_device *pdev)
116 return ret; 117 return ret;
117} 118}
118 119
119static int arndale_audio_remove(struct platform_device *pdev)
120{
121 struct snd_soc_card *card = platform_get_drvdata(pdev);
122
123 snd_soc_unregister_card(card);
124
125 return 0;
126}
127
128static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = { 120static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
129 { .compatible = "samsung,arndale-rt5631", }, 121 { .compatible = "samsung,arndale-rt5631", },
130 { .compatible = "samsung,arndale-alc5631", }, 122 { .compatible = "samsung,arndale-alc5631", },
@@ -139,7 +131,6 @@ static struct platform_driver arndale_audio_driver = {
139 .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match), 131 .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
140 }, 132 },
141 .probe = arndale_audio_probe, 133 .probe = arndale_audio_probe,
142 .remove = arndale_audio_remove,
143}; 134};
144 135
145module_platform_driver(arndale_audio_driver); 136module_platform_driver(arndale_audio_driver);
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 7651dc924161..07ce2cfa4845 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -56,6 +56,7 @@ static int snow_late_probe(struct snd_soc_card *card)
56 56
57static struct snd_soc_card snow_snd = { 57static struct snd_soc_card snow_snd = {
58 .name = "Snow-I2S", 58 .name = "Snow-I2S",
59 .owner = THIS_MODULE,
59 .dai_link = snow_dai, 60 .dai_link = snow_dai,
60 .num_links = ARRAY_SIZE(snow_dai), 61 .num_links = ARRAY_SIZE(snow_dai),
61 62
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index fd11404a3bc7..8fad4441c87d 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -327,13 +327,7 @@ static struct snd_soc_platform_driver sh7760_soc_platform = {
327 327
328static int sh7760_soc_platform_probe(struct platform_device *pdev) 328static int sh7760_soc_platform_probe(struct platform_device *pdev)
329{ 329{
330 return snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform); 330 return devm_snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
331}
332
333static int sh7760_soc_platform_remove(struct platform_device *pdev)
334{
335 snd_soc_unregister_platform(&pdev->dev);
336 return 0;
337} 331}
338 332
339static struct platform_driver sh7760_pcm_driver = { 333static struct platform_driver sh7760_pcm_driver = {
@@ -342,7 +336,6 @@ static struct platform_driver sh7760_pcm_driver = {
342 }, 336 },
343 337
344 .probe = sh7760_soc_platform_probe, 338 .probe = sh7760_soc_platform_probe,
345 .remove = sh7760_soc_platform_remove,
346}; 339};
347 340
348module_platform_driver(sh7760_pcm_driver); 341module_platform_driver(sh7760_pcm_driver);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 142c066eaee2..0215c78cbddf 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1911,7 +1911,6 @@ MODULE_DEVICE_TABLE(of, fsi_of_match);
1911 1911
1912static const struct platform_device_id fsi_id_table[] = { 1912static const struct platform_device_id fsi_id_table[] = {
1913 { "sh_fsi", (kernel_ulong_t)&fsi1_core }, 1913 { "sh_fsi", (kernel_ulong_t)&fsi1_core },
1914 { "sh_fsi2", (kernel_ulong_t)&fsi2_core },
1915 {}, 1914 {},
1916}; 1915};
1917MODULE_DEVICE_TABLE(platform, fsi_id_table); 1916MODULE_DEVICE_TABLE(platform, fsi_id_table);
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index f1b445173fba..8b258501aa35 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,4 +1,4 @@
1snd-soc-rcar-objs := core.o gen.o dma.o src.o adg.o ssi.o dvc.o 1snd-soc-rcar-objs := core.o gen.o dma.o adg.o ssi.o src.o ctu.o mix.o dvc.o
2obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o 2obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o
3 3
4snd-soc-rsrc-card-objs := rsrc-card.o 4snd-soc-rsrc-card-objs := rsrc-card.o
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f1e5920654f6..f3feed5ce9b6 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -203,9 +203,9 @@ int rsnd_io_is_working(struct rsnd_dai_stream *io)
203} 203}
204 204
205/* 205/*
206 * settting function 206 * ADINR function
207 */ 207 */
208u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io) 208u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
209{ 209{
210 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 210 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
211 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 211 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -227,6 +227,64 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
227 return adinr; 227 return adinr;
228} 228}
229 229
230u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
231{
232 struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
233 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
234 struct device *dev = rsnd_priv_to_dev(priv);
235 u32 chan = runtime->channels;
236
237 switch (chan) {
238 case 1:
239 case 2:
240 case 4:
241 case 6:
242 case 8:
243 break;
244 default:
245 dev_warn(dev, "not supported channel\n");
246 chan = 0;
247 break;
248 }
249
250 return chan;
251}
252
253/*
254 * DALIGN function
255 */
256u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
257{
258 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
259 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
260 struct rsnd_mod *target = src ? src : ssi;
261 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
262 u32 val = 0x76543210;
263 u32 mask = ~0;
264
265 mask <<= runtime->channels * 4;
266 val = val & mask;
267
268 switch (runtime->sample_bits) {
269 case 16:
270 val |= 0x67452301 & ~mask;
271 break;
272 case 32:
273 val |= 0x76543210 & ~mask;
274 break;
275 }
276
277 /*
278 * exchange channeles on SRC if possible,
279 * otherwise, R/L volume settings on DVC
280 * changes inverted channels
281 */
282 if (mod == target)
283 return val;
284 else
285 return 0x76543210;
286}
287
230/* 288/*
231 * rsnd_dai functions 289 * rsnd_dai functions
232 */ 290 */
@@ -242,9 +300,9 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
242 if (val == __rsnd_mod_call_##func) { \ 300 if (val == __rsnd_mod_call_##func) { \
243 called = 1; \ 301 called = 1; \
244 ret = (mod)->ops->func(mod, io, param); \ 302 ret = (mod)->ops->func(mod, io, param); \
245 mod->status = (mod->status & ~mask) + \
246 (add << __rsnd_mod_shift_##func); \
247 } \ 303 } \
304 mod->status = (mod->status & ~mask) + \
305 (add << __rsnd_mod_shift_##func); \
248 dev_dbg(dev, "%s[%d] 0x%08x %s\n", \ 306 dev_dbg(dev, "%s[%d] 0x%08x %s\n", \
249 rsnd_mod_name(mod), rsnd_mod_id(mod), mod->status, \ 307 rsnd_mod_name(mod), rsnd_mod_id(mod), mod->status, \
250 called ? #func : ""); \ 308 called ? #func : ""); \
@@ -274,21 +332,21 @@ u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
274static int rsnd_dai_connect(struct rsnd_mod *mod, 332static int rsnd_dai_connect(struct rsnd_mod *mod,
275 struct rsnd_dai_stream *io) 333 struct rsnd_dai_stream *io)
276{ 334{
335 struct rsnd_priv *priv;
336 struct device *dev;
337
277 if (!mod) 338 if (!mod)
278 return -EIO; 339 return -EIO;
279 340
280 if (io->mod[mod->type]) { 341 priv = rsnd_mod_to_priv(mod);
281 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 342 dev = rsnd_priv_to_dev(priv);
282 struct device *dev = rsnd_priv_to_dev(priv);
283
284 dev_err(dev, "%s[%d] is not empty\n",
285 rsnd_mod_name(mod),
286 rsnd_mod_id(mod));
287 return -EIO;
288 }
289 343
290 io->mod[mod->type] = mod; 344 io->mod[mod->type] = mod;
291 345
346 dev_dbg(dev, "%s[%d] is connected to io (%s)\n",
347 rsnd_mod_name(mod), rsnd_mod_id(mod),
348 rsnd_io_is_play(io) ? "Playback" : "Capture");
349
292 return 0; 350 return 0;
293} 351}
294 352
@@ -517,7 +575,7 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
517 .set_fmt = rsnd_soc_dai_set_fmt, 575 .set_fmt = rsnd_soc_dai_set_fmt,
518}; 576};
519 577
520#define rsnd_path_parse(priv, io, type) \ 578#define rsnd_path_add(priv, io, type) \
521({ \ 579({ \
522 struct rsnd_mod *mod; \ 580 struct rsnd_mod *mod; \
523 int ret = 0; \ 581 int ret = 0; \
@@ -533,7 +591,7 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
533 ret; \ 591 ret; \
534}) 592})
535 593
536#define rsnd_path_break(priv, io, type) \ 594#define rsnd_path_remove(priv, io, type) \
537{ \ 595{ \
538 struct rsnd_mod *mod; \ 596 struct rsnd_mod *mod; \
539 int id = -1; \ 597 int id = -1; \
@@ -547,6 +605,79 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
547 } \ 605 } \
548} 606}
549 607
608void rsnd_path_parse(struct rsnd_priv *priv,
609 struct rsnd_dai_stream *io)
610{
611 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
612 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
613 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
614 struct rsnd_mod *cmd;
615 struct device *dev = rsnd_priv_to_dev(priv);
616 u32 data;
617
618 /* Gen1 is not supported */
619 if (rsnd_is_gen1(priv))
620 return;
621
622 if (!mix && !dvc)
623 return;
624
625 if (mix) {
626 struct rsnd_dai *rdai;
627 int i;
628 u32 path[] = {
629 [0] = 0,
630 [1] = 1 << 0,
631 [2] = 0,
632 [3] = 0,
633 [4] = 0,
634 [5] = 1 << 8
635 };
636
637 /*
638 * it is assuming that integrater is well understanding about
639 * data path. Here doesn't check impossible connection,
640 * like src2 + src5
641 */
642 data = 0;
643 for_each_rsnd_dai(rdai, priv, i) {
644 io = &rdai->playback;
645 if (mix == rsnd_io_to_mod_mix(io))
646 data |= path[rsnd_mod_id(src)];
647
648 io = &rdai->capture;
649 if (mix == rsnd_io_to_mod_mix(io))
650 data |= path[rsnd_mod_id(src)];
651 }
652
653 /*
654 * We can't use ctu = rsnd_io_ctu() here.
655 * Since, ID of dvc/mix are 0 or 1 (= same as CMD number)
656 * but ctu IDs are 0 - 7 (= CTU00 - CTU13)
657 */
658 cmd = mix;
659 } else {
660 u32 path[] = {
661 [0] = 0x30000,
662 [1] = 0x30001,
663 [2] = 0x40000,
664 [3] = 0x10000,
665 [4] = 0x20000,
666 [5] = 0x40100
667 };
668
669 data = path[rsnd_mod_id(src)];
670
671 cmd = dvc;
672 }
673
674 dev_dbg(dev, "ctu/mix path = 0x%08x", data);
675
676 rsnd_mod_write(cmd, CMD_ROUTE_SLCT, data);
677
678 rsnd_mod_write(cmd, CMD_CTRL, 0x10);
679}
680
550static int rsnd_path_init(struct rsnd_priv *priv, 681static int rsnd_path_init(struct rsnd_priv *priv,
551 struct rsnd_dai *rdai, 682 struct rsnd_dai *rdai,
552 struct rsnd_dai_stream *io) 683 struct rsnd_dai_stream *io)
@@ -564,18 +695,28 @@ static int rsnd_path_init(struct rsnd_priv *priv,
564 * using fixed path. 695 * using fixed path.
565 */ 696 */
566 697
698 /* SSI */
699 ret = rsnd_path_add(priv, io, ssi);
700 if (ret < 0)
701 return ret;
702
567 /* SRC */ 703 /* SRC */
568 ret = rsnd_path_parse(priv, io, src); 704 ret = rsnd_path_add(priv, io, src);
569 if (ret < 0) 705 if (ret < 0)
570 return ret; 706 return ret;
571 707
572 /* SSI */ 708 /* CTU */
573 ret = rsnd_path_parse(priv, io, ssi); 709 ret = rsnd_path_add(priv, io, ctu);
710 if (ret < 0)
711 return ret;
712
713 /* MIX */
714 ret = rsnd_path_add(priv, io, mix);
574 if (ret < 0) 715 if (ret < 0)
575 return ret; 716 return ret;
576 717
577 /* DVC */ 718 /* DVC */
578 ret = rsnd_path_parse(priv, io, dvc); 719 ret = rsnd_path_add(priv, io, dvc);
579 if (ret < 0) 720 if (ret < 0)
580 return ret; 721 return ret;
581 722
@@ -589,13 +730,15 @@ static void rsnd_of_parse_dai(struct platform_device *pdev,
589 struct device_node *dai_node, *dai_np; 730 struct device_node *dai_node, *dai_np;
590 struct device_node *ssi_node, *ssi_np; 731 struct device_node *ssi_node, *ssi_np;
591 struct device_node *src_node, *src_np; 732 struct device_node *src_node, *src_np;
733 struct device_node *ctu_node, *ctu_np;
734 struct device_node *mix_node, *mix_np;
592 struct device_node *dvc_node, *dvc_np; 735 struct device_node *dvc_node, *dvc_np;
593 struct device_node *playback, *capture; 736 struct device_node *playback, *capture;
594 struct rsnd_dai_platform_info *dai_info; 737 struct rsnd_dai_platform_info *dai_info;
595 struct rcar_snd_info *info = rsnd_priv_to_info(priv); 738 struct rcar_snd_info *info = rsnd_priv_to_info(priv);
596 struct device *dev = &pdev->dev; 739 struct device *dev = &pdev->dev;
597 int nr, i; 740 int nr, i;
598 int dai_i, ssi_i, src_i, dvc_i; 741 int dai_i, ssi_i, src_i, ctu_i, mix_i, dvc_i;
599 742
600 if (!of_data) 743 if (!of_data)
601 return; 744 return;
@@ -621,6 +764,8 @@ static void rsnd_of_parse_dai(struct platform_device *pdev,
621 764
622 ssi_node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi"); 765 ssi_node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi");
623 src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src"); 766 src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src");
767 ctu_node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
768 mix_node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
624 dvc_node = of_get_child_by_name(dev->of_node, "rcar_sound,dvc"); 769 dvc_node = of_get_child_by_name(dev->of_node, "rcar_sound,dvc");
625 770
626#define mod_parse(name) \ 771#define mod_parse(name) \
@@ -657,6 +802,8 @@ if (name##_node) { \
657 802
658 mod_parse(ssi); 803 mod_parse(ssi);
659 mod_parse(src); 804 mod_parse(src);
805 mod_parse(ctu);
806 mod_parse(mix);
660 mod_parse(dvc); 807 mod_parse(dvc);
661 808
662 of_node_put(playback); 809 of_node_put(playback);
@@ -1033,8 +1180,8 @@ static int rsnd_rdai_continuance_probe(struct rsnd_priv *priv,
1033 /* 1180 /*
1034 * remove SRC/DVC from DAI, 1181 * remove SRC/DVC from DAI,
1035 */ 1182 */
1036 rsnd_path_break(priv, io, src); 1183 rsnd_path_remove(priv, io, src);
1037 rsnd_path_break(priv, io, dvc); 1184 rsnd_path_remove(priv, io, dvc);
1038 1185
1039 /* 1186 /*
1040 * fallback 1187 * fallback
@@ -1069,6 +1216,8 @@ static int rsnd_probe(struct platform_device *pdev)
1069 rsnd_dma_probe, 1216 rsnd_dma_probe,
1070 rsnd_ssi_probe, 1217 rsnd_ssi_probe,
1071 rsnd_src_probe, 1218 rsnd_src_probe,
1219 rsnd_ctu_probe,
1220 rsnd_mix_probe,
1072 rsnd_dvc_probe, 1221 rsnd_dvc_probe,
1073 rsnd_adg_probe, 1222 rsnd_adg_probe,
1074 rsnd_dai_probe, 1223 rsnd_dai_probe,
@@ -1164,6 +1313,8 @@ static int rsnd_remove(struct platform_device *pdev)
1164 struct rsnd_priv *priv) = { 1313 struct rsnd_priv *priv) = {
1165 rsnd_ssi_remove, 1314 rsnd_ssi_remove,
1166 rsnd_src_remove, 1315 rsnd_src_remove,
1316 rsnd_ctu_remove,
1317 rsnd_mix_remove,
1167 rsnd_dvc_remove, 1318 rsnd_dvc_remove,
1168 }; 1319 };
1169 int ret = 0, i; 1320 int ret = 0, i;
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
new file mode 100644
index 000000000000..05498bba5874
--- /dev/null
+++ b/sound/soc/sh/rcar/ctu.c
@@ -0,0 +1,171 @@
1/*
2 * ctu.c
3 *
4 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include "rsnd.h"
11
12#define CTU_NAME_SIZE 16
13#define CTU_NAME "ctu"
14
15struct rsnd_ctu {
16 struct rsnd_ctu_platform_info *info; /* rcar_snd.h */
17 struct rsnd_mod mod;
18};
19
20#define rsnd_ctu_nr(priv) ((priv)->ctu_nr)
21#define for_each_rsnd_ctu(pos, priv, i) \
22 for ((i) = 0; \
23 ((i) < rsnd_ctu_nr(priv)) && \
24 ((pos) = (struct rsnd_ctu *)(priv)->ctu + i); \
25 i++)
26
27#define rsnd_ctu_initialize_lock(mod) __rsnd_ctu_initialize_lock(mod, 1)
28#define rsnd_ctu_initialize_unlock(mod) __rsnd_ctu_initialize_lock(mod, 0)
29static void __rsnd_ctu_initialize_lock(struct rsnd_mod *mod, u32 enable)
30{
31 rsnd_mod_write(mod, CTU_CTUIR, enable);
32}
33
34static int rsnd_ctu_init(struct rsnd_mod *mod,
35 struct rsnd_dai_stream *io,
36 struct rsnd_priv *priv)
37{
38 rsnd_mod_hw_start(mod);
39
40 rsnd_ctu_initialize_lock(mod);
41
42 rsnd_mod_write(mod, CTU_ADINR, rsnd_get_adinr_chan(mod, io));
43
44 rsnd_ctu_initialize_unlock(mod);
45
46 return 0;
47}
48
49static int rsnd_ctu_quit(struct rsnd_mod *mod,
50 struct rsnd_dai_stream *io,
51 struct rsnd_priv *priv)
52{
53 rsnd_mod_hw_stop(mod);
54
55 return 0;
56}
57
58static struct rsnd_mod_ops rsnd_ctu_ops = {
59 .name = CTU_NAME,
60 .init = rsnd_ctu_init,
61 .quit = rsnd_ctu_quit,
62};
63
64struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id)
65{
66 if (WARN_ON(id < 0 || id >= rsnd_ctu_nr(priv)))
67 id = 0;
68
69 return &((struct rsnd_ctu *)(priv->ctu) + id)->mod;
70}
71
72static void rsnd_of_parse_ctu(struct platform_device *pdev,
73 const struct rsnd_of_data *of_data,
74 struct rsnd_priv *priv)
75{
76 struct device_node *node;
77 struct rsnd_ctu_platform_info *ctu_info;
78 struct rcar_snd_info *info = rsnd_priv_to_info(priv);
79 struct device *dev = &pdev->dev;
80 int nr;
81
82 if (!of_data)
83 return;
84
85 node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
86 if (!node)
87 return;
88
89 nr = of_get_child_count(node);
90 if (!nr)
91 goto rsnd_of_parse_ctu_end;
92
93 ctu_info = devm_kzalloc(dev,
94 sizeof(struct rsnd_ctu_platform_info) * nr,
95 GFP_KERNEL);
96 if (!ctu_info) {
97 dev_err(dev, "ctu info allocation error\n");
98 goto rsnd_of_parse_ctu_end;
99 }
100
101 info->ctu_info = ctu_info;
102 info->ctu_info_nr = nr;
103
104rsnd_of_parse_ctu_end:
105 of_node_put(node);
106
107}
108
109int rsnd_ctu_probe(struct platform_device *pdev,
110 const struct rsnd_of_data *of_data,
111 struct rsnd_priv *priv)
112{
113 struct rcar_snd_info *info = rsnd_priv_to_info(priv);
114 struct device *dev = rsnd_priv_to_dev(priv);
115 struct rsnd_ctu *ctu;
116 struct clk *clk;
117 char name[CTU_NAME_SIZE];
118 int i, nr, ret;
119
120 /* This driver doesn't support Gen1 at this point */
121 if (rsnd_is_gen1(priv)) {
122 dev_warn(dev, "CTU is not supported on Gen1\n");
123 return -EINVAL;
124 }
125
126 rsnd_of_parse_ctu(pdev, of_data, priv);
127
128 nr = info->ctu_info_nr;
129 if (!nr)
130 return 0;
131
132 ctu = devm_kzalloc(dev, sizeof(*ctu) * nr, GFP_KERNEL);
133 if (!ctu)
134 return -ENOMEM;
135
136 priv->ctu_nr = nr;
137 priv->ctu = ctu;
138
139 for_each_rsnd_ctu(ctu, priv, i) {
140 /*
141 * CTU00, CTU01, CTU02, CTU03 => CTU0
142 * CTU10, CTU11, CTU12, CTU13 => CTU1
143 */
144 snprintf(name, CTU_NAME_SIZE, "%s.%d",
145 CTU_NAME, i / 4);
146
147 clk = devm_clk_get(dev, name);
148 if (IS_ERR(clk))
149 return PTR_ERR(clk);
150
151 ctu->info = &info->ctu_info[i];
152
153 ret = rsnd_mod_init(priv, &ctu->mod, &rsnd_ctu_ops,
154 clk, RSND_MOD_CTU, i);
155 if (ret)
156 return ret;
157 }
158
159 return 0;
160}
161
162void rsnd_ctu_remove(struct platform_device *pdev,
163 struct rsnd_priv *priv)
164{
165 struct rsnd_ctu *ctu;
166 int i;
167
168 for_each_rsnd_ctu(ctu, priv, i) {
169 rsnd_mod_quit(&ctu->mod);
170 }
171}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index d306e298c63d..bfbb8a5e93bd 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -27,6 +27,15 @@ struct rsnd_dma_ctrl {
27 int dmapp_num; 27 int dmapp_num;
28}; 28};
29 29
30struct rsnd_dma_ops {
31 char *name;
32 void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
33 void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
34 int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
35 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
36 void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
37};
38
30#define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma) 39#define rsnd_priv_to_dmac(p) ((struct rsnd_dma_ctrl *)(p)->dma)
31 40
32/* 41/*
@@ -168,7 +177,7 @@ static int rsnd_dmaen_init(struct rsnd_dai_stream *io,
168 dma_cap_set(DMA_SLAVE, mask); 177 dma_cap_set(DMA_SLAVE, mask);
169 178
170 dmaen->chan = dma_request_channel(mask, shdma_chan_filter, 179 dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
171 (void *)id); 180 (void *)(uintptr_t)id);
172 } 181 }
173 if (IS_ERR_OR_NULL(dmaen->chan)) { 182 if (IS_ERR_OR_NULL(dmaen->chan)) {
174 dmaen->chan = NULL; 183 dmaen->chan = NULL;
@@ -182,7 +191,8 @@ static int rsnd_dmaen_init(struct rsnd_dai_stream *io,
182 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 191 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
183 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 192 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
184 193
185 dev_dbg(dev, "dma : %pad -> %pad\n", 194 dev_dbg(dev, "%s %pad -> %pad\n",
195 dma->ops->name,
186 &cfg.src_addr, &cfg.dst_addr); 196 &cfg.src_addr, &cfg.dst_addr);
187 197
188 ret = dmaengine_slave_config(dmaen->chan, &cfg); 198 ret = dmaengine_slave_config(dmaen->chan, &cfg);
@@ -215,6 +225,7 @@ static void rsnd_dmaen_quit(struct rsnd_dai_stream *io, struct rsnd_dma *dma)
215} 225}
216 226
217static struct rsnd_dma_ops rsnd_dmaen_ops = { 227static struct rsnd_dma_ops rsnd_dmaen_ops = {
228 .name = "audmac",
218 .start = rsnd_dmaen_start, 229 .start = rsnd_dmaen_start,
219 .stop = rsnd_dmaen_stop, 230 .stop = rsnd_dmaen_stop,
220 .init = rsnd_dmaen_init, 231 .init = rsnd_dmaen_init,
@@ -360,6 +371,7 @@ static int rsnd_dmapp_init(struct rsnd_dai_stream *io,
360} 371}
361 372
362static struct rsnd_dma_ops rsnd_dmapp_ops = { 373static struct rsnd_dma_ops rsnd_dmapp_ops = {
374 .name = "audmac-pp",
363 .start = rsnd_dmapp_start, 375 .start = rsnd_dmapp_start,
364 .stop = rsnd_dmapp_stop, 376 .stop = rsnd_dmapp_stop,
365 .init = rsnd_dmapp_init, 377 .init = rsnd_dmapp_init,
@@ -414,7 +426,9 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
414 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU); 426 phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
415 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod); 427 int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
416 int use_src = !!rsnd_io_to_mod_src(io); 428 int use_src = !!rsnd_io_to_mod_src(io);
417 int use_dvc = !!rsnd_io_to_mod_dvc(io); 429 int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
430 !!rsnd_io_to_mod_mix(io) ||
431 !!rsnd_io_to_mod_ctu(io);
418 int id = rsnd_mod_id(mod); 432 int id = rsnd_mod_id(mod);
419 struct dma_addr { 433 struct dma_addr {
420 dma_addr_t out_addr; 434 dma_addr_t out_addr;
@@ -452,7 +466,7 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
452 }; 466 };
453 467
454 /* it shouldn't happen */ 468 /* it shouldn't happen */
455 if (use_dvc && !use_src) 469 if (use_cmd && !use_src)
456 dev_err(dev, "DVC is selected without SRC\n"); 470 dev_err(dev, "DVC is selected without SRC\n");
457 471
458 /* use SSIU or SSI ? */ 472 /* use SSIU or SSI ? */
@@ -460,8 +474,8 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
460 is_ssi++; 474 is_ssi++;
461 475
462 return (is_from) ? 476 return (is_from) ?
463 dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr : 477 dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
464 dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr; 478 dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
465} 479}
466 480
467static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io, 481static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
@@ -482,7 +496,7 @@ static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
482 return rsnd_gen2_dma_addr(io, mod, is_play, is_from); 496 return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
483} 497}
484 498
485#define MOD_MAX 4 /* MEM/SSI/SRC/DVC */ 499#define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
486static void rsnd_dma_of_path(struct rsnd_dma *dma, 500static void rsnd_dma_of_path(struct rsnd_dma *dma,
487 struct rsnd_dai_stream *io, 501 struct rsnd_dai_stream *io,
488 int is_play, 502 int is_play,
@@ -492,55 +506,81 @@ static void rsnd_dma_of_path(struct rsnd_dma *dma,
492 struct rsnd_mod *this = rsnd_dma_to_mod(dma); 506 struct rsnd_mod *this = rsnd_dma_to_mod(dma);
493 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io); 507 struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
494 struct rsnd_mod *src = rsnd_io_to_mod_src(io); 508 struct rsnd_mod *src = rsnd_io_to_mod_src(io);
509 struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
510 struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
495 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); 511 struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
496 struct rsnd_mod *mod[MOD_MAX]; 512 struct rsnd_mod *mod[MOD_MAX];
497 int i, index; 513 struct rsnd_mod *mod_start, *mod_end;
514 struct rsnd_priv *priv = rsnd_mod_to_priv(this);
515 struct device *dev = rsnd_priv_to_dev(priv);
516 int nr, i;
498 517
518 if (!ssi)
519 return;
499 520
500 for (i = 0; i < MOD_MAX; i++) 521 nr = 0;
522 for (i = 0; i < MOD_MAX; i++) {
501 mod[i] = NULL; 523 mod[i] = NULL;
524 nr += !!rsnd_io_to_mod(io, i);
525 }
502 526
503 /* 527 /*
504 * in play case... 528 * [S] -*-> [E]
529 * [S] -*-> SRC -o-> [E]
530 * [S] -*-> SRC -> DVC -o-> [E]
531 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
505 * 532 *
506 * src -> dst 533 * playback [S] = mem
534 * [E] = SSI
507 * 535 *
508 * mem -> SSI 536 * capture [S] = SSI
509 * mem -> SRC -> SSI 537 * [E] = mem
510 * mem -> SRC -> DVC -> SSI 538 *
539 * -*-> Audio DMAC
540 * -o-> Audio DMAC peri peri
511 */ 541 */
512 mod[0] = NULL; /* for "mem" */ 542 mod_start = (is_play) ? NULL : ssi;
513 index = 1; 543 mod_end = (is_play) ? ssi : NULL;
514 for (i = 1; i < MOD_MAX; i++) {
515 if (!src) {
516 mod[i] = ssi;
517 } else if (!dvc) {
518 mod[i] = src;
519 src = NULL;
520 } else {
521 if ((!is_play) && (this == src))
522 this = dvc;
523 544
524 mod[i] = (is_play) ? src : dvc; 545 mod[0] = mod_start;
525 i++; 546 for (i = 1; i < nr; i++) {
526 mod[i] = (is_play) ? dvc : src; 547 if (src) {
548 mod[i] = src;
527 src = NULL; 549 src = NULL;
550 } else if (ctu) {
551 mod[i] = ctu;
552 ctu = NULL;
553 } else if (mix) {
554 mod[i] = mix;
555 mix = NULL;
556 } else if (dvc) {
557 mod[i] = dvc;
528 dvc = NULL; 558 dvc = NULL;
529 } 559 }
530
531 if (mod[i] == this)
532 index = i;
533
534 if (mod[i] == ssi)
535 break;
536 } 560 }
561 mod[i] = mod_end;
537 562
538 if (is_play) { 563 /*
539 *mod_from = mod[index - 1]; 564 * | SSI | SRC |
540 *mod_to = mod[index]; 565 * -------------+-----+-----+
566 * is_play | o | * |
567 * !is_play | * | o |
568 */
569 if ((this == ssi) == (is_play)) {
570 *mod_from = mod[nr - 1];
571 *mod_to = mod[nr];
541 } else { 572 } else {
542 *mod_from = mod[index]; 573 *mod_from = mod[0];
543 *mod_to = mod[index - 1]; 574 *mod_to = mod[1];
575 }
576
577 dev_dbg(dev, "module connection (this is %s[%d])\n",
578 rsnd_mod_name(this), rsnd_mod_id(this));
579 for (i = 0; i <= nr; i++) {
580 dev_dbg(dev, " %s[%d]%s\n",
581 rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
582 (mod[i] == *mod_from) ? " from" :
583 (mod[i] == *mod_to) ? " to" : "");
544 } 584 }
545} 585}
546 586
@@ -568,10 +608,11 @@ void rsnd_dma_quit(struct rsnd_dai_stream *io, struct rsnd_dma *dma)
568 608
569int rsnd_dma_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id) 609int rsnd_dma_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id)
570{ 610{
571 struct rsnd_mod *mod_from; 611 struct rsnd_mod *mod_from = NULL;
572 struct rsnd_mod *mod_to; 612 struct rsnd_mod *mod_to = NULL;
573 struct rsnd_priv *priv = rsnd_io_to_priv(io); 613 struct rsnd_priv *priv = rsnd_io_to_priv(io);
574 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv); 614 struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
615 struct device *dev = rsnd_priv_to_dev(priv);
575 int is_play = rsnd_io_is_play(io); 616 int is_play = rsnd_io_is_play(io);
576 617
577 /* 618 /*
@@ -598,6 +639,11 @@ int rsnd_dma_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id)
598 if (rsnd_is_gen1(priv)) 639 if (rsnd_is_gen1(priv))
599 dma->ops = &rsnd_dmaen_ops; 640 dma->ops = &rsnd_dmaen_ops;
600 641
642 dev_dbg(dev, "%s %s[%d] -> %s[%d]\n",
643 dma->ops->name,
644 rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
645 rsnd_mod_name(mod_to), rsnd_mod_id(mod_to));
646
601 return dma->ops->init(io, dma, id, mod_from, mod_to); 647 return dma->ops->init(io, dma, id, mod_from, mod_to);
602} 648}
603 649
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 36fc020cbc18..57796387d482 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -24,6 +24,7 @@ struct rsnd_dvc {
24 struct rsnd_kctrl_cfg_s rdown; /* Ramp Rate Down */ 24 struct rsnd_kctrl_cfg_s rdown; /* Ramp Rate Down */
25}; 25};
26 26
27#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
27#define rsnd_dvc_of_node(priv) \ 28#define rsnd_dvc_of_node(priv) \
28 of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc") 29 of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
29 30
@@ -63,6 +64,19 @@ static const char * const dvc_ramp_rate[] = {
63 "0.125 dB/8192 steps", /* 10111 */ 64 "0.125 dB/8192 steps", /* 10111 */
64}; 65};
65 66
67static void rsnd_dvc_soft_reset(struct rsnd_mod *mod)
68{
69 rsnd_mod_write(mod, DVC_SWRSR, 0);
70 rsnd_mod_write(mod, DVC_SWRSR, 1);
71}
72
73#define rsnd_dvc_initialize_lock(mod) __rsnd_dvc_initialize_lock(mod, 1)
74#define rsnd_dvc_initialize_unlock(mod) __rsnd_dvc_initialize_lock(mod, 0)
75static void __rsnd_dvc_initialize_lock(struct rsnd_mod *mod, u32 enable)
76{
77 rsnd_mod_write(mod, DVC_DVUIR, enable);
78}
79
66static void rsnd_dvc_volume_update(struct rsnd_dai_stream *io, 80static void rsnd_dvc_volume_update(struct rsnd_dai_stream *io,
67 struct rsnd_mod *mod) 81 struct rsnd_mod *mod)
68{ 82{
@@ -135,49 +149,24 @@ static int rsnd_dvc_remove_gen2(struct rsnd_mod *mod,
135 return 0; 149 return 0;
136} 150}
137 151
138static int rsnd_dvc_init(struct rsnd_mod *dvc_mod, 152static int rsnd_dvc_init(struct rsnd_mod *mod,
139 struct rsnd_dai_stream *io, 153 struct rsnd_dai_stream *io,
140 struct rsnd_priv *priv) 154 struct rsnd_priv *priv)
141{ 155{
142 struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io); 156 rsnd_mod_hw_start(mod);
143 struct device *dev = rsnd_priv_to_dev(priv);
144 int dvc_id = rsnd_mod_id(dvc_mod);
145 int src_id = rsnd_mod_id(src_mod);
146 u32 route[] = {
147 [0] = 0x30000,
148 [1] = 0x30001,
149 [2] = 0x40000,
150 [3] = 0x10000,
151 [4] = 0x20000,
152 [5] = 0x40100
153 };
154
155 if (src_id >= ARRAY_SIZE(route)) {
156 dev_err(dev, "DVC%d isn't connected to SRC%d\n", dvc_id, src_id);
157 return -EINVAL;
158 }
159
160 rsnd_mod_hw_start(dvc_mod);
161 157
162 /* 158 rsnd_dvc_soft_reset(mod);
163 * fixme
164 * it doesn't support CTU/MIX
165 */
166 rsnd_mod_write(dvc_mod, CMD_ROUTE_SLCT, route[src_id]);
167 159
168 rsnd_mod_write(dvc_mod, DVC_SWRSR, 0); 160 rsnd_dvc_initialize_lock(mod);
169 rsnd_mod_write(dvc_mod, DVC_SWRSR, 1);
170 161
171 rsnd_mod_write(dvc_mod, DVC_DVUIR, 1); 162 rsnd_path_parse(priv, io);
172 163
173 rsnd_mod_write(dvc_mod, DVC_ADINR, rsnd_get_adinr(dvc_mod, io)); 164 rsnd_mod_write(mod, DVC_ADINR, rsnd_get_adinr_bit(mod, io));
174 165
175 /* ch0/ch1 Volume */ 166 /* ch0/ch1 Volume */
176 rsnd_dvc_volume_update(io, dvc_mod); 167 rsnd_dvc_volume_update(io, mod);
177 168
178 rsnd_mod_write(dvc_mod, DVC_DVUIR, 0); 169 rsnd_adg_set_cmd_timsel_gen2(mod, io);
179
180 rsnd_adg_set_cmd_timsel_gen2(dvc_mod, io);
181 170
182 return 0; 171 return 0;
183} 172}
@@ -195,6 +184,8 @@ static int rsnd_dvc_start(struct rsnd_mod *mod,
195 struct rsnd_dai_stream *io, 184 struct rsnd_dai_stream *io,
196 struct rsnd_priv *priv) 185 struct rsnd_priv *priv)
197{ 186{
187 rsnd_dvc_initialize_unlock(mod);
188
198 rsnd_mod_write(mod, CMD_CTRL, 0x10); 189 rsnd_mod_write(mod, CMD_CTRL, 0x10);
199 190
200 return 0; 191 return 0;
@@ -341,23 +332,21 @@ int rsnd_dvc_probe(struct platform_device *pdev,
341 char name[RSND_DVC_NAME_SIZE]; 332 char name[RSND_DVC_NAME_SIZE];
342 int i, nr, ret; 333 int i, nr, ret;
343 334
344 rsnd_of_parse_dvc(pdev, of_data, priv);
345
346 nr = info->dvc_info_nr;
347 if (!nr)
348 return 0;
349
350 /* This driver doesn't support Gen1 at this point */ 335 /* This driver doesn't support Gen1 at this point */
351 if (rsnd_is_gen1(priv)) { 336 if (rsnd_is_gen1(priv)) {
352 dev_warn(dev, "CMD is not supported on Gen1\n"); 337 dev_warn(dev, "CMD is not supported on Gen1\n");
353 return -EINVAL; 338 return -EINVAL;
354 } 339 }
355 340
341 rsnd_of_parse_dvc(pdev, of_data, priv);
342
343 nr = info->dvc_info_nr;
344 if (!nr)
345 return 0;
346
356 dvc = devm_kzalloc(dev, sizeof(*dvc) * nr, GFP_KERNEL); 347 dvc = devm_kzalloc(dev, sizeof(*dvc) * nr, GFP_KERNEL);
357 if (!dvc) { 348 if (!dvc)
358 dev_err(dev, "CMD allocate failed\n");
359 return -ENOMEM; 349 return -ENOMEM;
360 }
361 350
362 priv->dvc_nr = nr; 351 priv->dvc_nr = nr;
363 priv->dvc = dvc; 352 priv->dvc = dvc;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 8c7dc51b1c4f..f04d17bc6e3d 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -103,6 +103,22 @@ void rsnd_write(struct rsnd_priv *priv,
103 regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data); 103 regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
104} 104}
105 105
106void rsnd_force_write(struct rsnd_priv *priv,
107 struct rsnd_mod *mod,
108 enum rsnd_reg reg, u32 data)
109{
110 struct device *dev = rsnd_priv_to_dev(priv);
111 struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
112
113 if (!rsnd_is_accessible_reg(priv, gen, reg))
114 return;
115
116 dev_dbg(dev, "w %s[%d] - %4d : %08x\n",
117 rsnd_mod_name(mod), rsnd_mod_id(mod), reg, data);
118
119 regmap_fields_force_write(gen->regs[reg], rsnd_mod_id(mod), data);
120}
121
106void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, 122void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
107 enum rsnd_reg reg, u32 mask, u32 data) 123 enum rsnd_reg reg, u32 mask, u32 data)
108{ 124{
@@ -200,12 +216,13 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
200 /* FIXME: it needs SSI_MODE2/3 in the future */ 216 /* FIXME: it needs SSI_MODE2/3 in the future */
201 RSND_GEN_M_REG(SSI_BUSIF_MODE, 0x0, 0x80), 217 RSND_GEN_M_REG(SSI_BUSIF_MODE, 0x0, 0x80),
202 RSND_GEN_M_REG(SSI_BUSIF_ADINR, 0x4, 0x80), 218 RSND_GEN_M_REG(SSI_BUSIF_ADINR, 0x4, 0x80),
203 RSND_GEN_M_REG(BUSIF_DALIGN, 0x8, 0x80), 219 RSND_GEN_M_REG(SSI_BUSIF_DALIGN,0x8, 0x80),
204 RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80), 220 RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
205 RSND_GEN_M_REG(INT_ENABLE, 0x18, 0x80), 221 RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
206 }; 222 };
207 struct rsnd_regmap_field_conf conf_scu[] = { 223 struct rsnd_regmap_field_conf conf_scu[] = {
208 RSND_GEN_M_REG(SRC_BUSIF_MODE, 0x0, 0x20), 224 RSND_GEN_M_REG(SRC_BUSIF_MODE, 0x0, 0x20),
225 RSND_GEN_M_REG(SRC_BUSIF_DALIGN,0x8, 0x20),
209 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20), 226 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20),
210 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20), 227 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20),
211 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20), 228 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20),
@@ -223,6 +240,18 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
223 RSND_GEN_M_REG(SRC_SRCCR, 0x224, 0x40), 240 RSND_GEN_M_REG(SRC_SRCCR, 0x224, 0x40),
224 RSND_GEN_M_REG(SRC_BSDSR, 0x22c, 0x40), 241 RSND_GEN_M_REG(SRC_BSDSR, 0x22c, 0x40),
225 RSND_GEN_M_REG(SRC_BSISR, 0x238, 0x40), 242 RSND_GEN_M_REG(SRC_BSISR, 0x238, 0x40),
243 RSND_GEN_M_REG(CTU_CTUIR, 0x504, 0x100),
244 RSND_GEN_M_REG(CTU_ADINR, 0x508, 0x100),
245 RSND_GEN_M_REG(MIX_SWRSR, 0xd00, 0x40),
246 RSND_GEN_M_REG(MIX_MIXIR, 0xd04, 0x40),
247 RSND_GEN_M_REG(MIX_ADINR, 0xd08, 0x40),
248 RSND_GEN_M_REG(MIX_MIXMR, 0xd10, 0x40),
249 RSND_GEN_M_REG(MIX_MVPDR, 0xd14, 0x40),
250 RSND_GEN_M_REG(MIX_MDBAR, 0xd18, 0x40),
251 RSND_GEN_M_REG(MIX_MDBBR, 0xd1c, 0x40),
252 RSND_GEN_M_REG(MIX_MDBCR, 0xd20, 0x40),
253 RSND_GEN_M_REG(MIX_MDBDR, 0xd24, 0x40),
254 RSND_GEN_M_REG(MIX_MDBER, 0xd28, 0x40),
226 RSND_GEN_M_REG(DVC_SWRSR, 0xe00, 0x100), 255 RSND_GEN_M_REG(DVC_SWRSR, 0xe00, 0x100),
227 RSND_GEN_M_REG(DVC_DVUIR, 0xe04, 0x100), 256 RSND_GEN_M_REG(DVC_DVUIR, 0xe04, 0x100),
228 RSND_GEN_M_REG(DVC_ADINR, 0xe08, 0x100), 257 RSND_GEN_M_REG(DVC_ADINR, 0xe08, 0x100),
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
new file mode 100644
index 000000000000..0d5c102db6f5
--- /dev/null
+++ b/sound/soc/sh/rcar/mix.c
@@ -0,0 +1,200 @@
1/*
2 * mix.c
3 *
4 * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include "rsnd.h"
11
12#define MIX_NAME_SIZE 16
13#define MIX_NAME "mix"
14
15struct rsnd_mix {
16 struct rsnd_mix_platform_info *info; /* rcar_snd.h */
17 struct rsnd_mod mod;
18};
19
20#define rsnd_mix_nr(priv) ((priv)->mix_nr)
21#define for_each_rsnd_mix(pos, priv, i) \
22 for ((i) = 0; \
23 ((i) < rsnd_mix_nr(priv)) && \
24 ((pos) = (struct rsnd_mix *)(priv)->mix + i); \
25 i++)
26
27
28static void rsnd_mix_soft_reset(struct rsnd_mod *mod)
29{
30 rsnd_mod_write(mod, MIX_SWRSR, 0);
31 rsnd_mod_write(mod, MIX_SWRSR, 1);
32}
33
34#define rsnd_mix_initialize_lock(mod) __rsnd_mix_initialize_lock(mod, 1)
35#define rsnd_mix_initialize_unlock(mod) __rsnd_mix_initialize_lock(mod, 0)
36static void __rsnd_mix_initialize_lock(struct rsnd_mod *mod, u32 enable)
37{
38 rsnd_mod_write(mod, MIX_MIXIR, enable);
39}
40
41static void rsnd_mix_volume_update(struct rsnd_dai_stream *io,
42 struct rsnd_mod *mod)
43{
44
45 /* Disable MIX dB setting */
46 rsnd_mod_write(mod, MIX_MDBER, 0);
47
48 rsnd_mod_write(mod, MIX_MDBAR, 0);
49 rsnd_mod_write(mod, MIX_MDBBR, 0);
50 rsnd_mod_write(mod, MIX_MDBCR, 0);
51 rsnd_mod_write(mod, MIX_MDBDR, 0);
52
53 /* Enable MIX dB setting */
54 rsnd_mod_write(mod, MIX_MDBER, 1);
55}
56
57static int rsnd_mix_init(struct rsnd_mod *mod,
58 struct rsnd_dai_stream *io,
59 struct rsnd_priv *priv)
60{
61 rsnd_mod_hw_start(mod);
62
63 rsnd_mix_soft_reset(mod);
64
65 rsnd_mix_initialize_lock(mod);
66
67 rsnd_mod_write(mod, MIX_ADINR, rsnd_get_adinr_chan(mod, io));
68
69 rsnd_path_parse(priv, io);
70
71 /* volume step */
72 rsnd_mod_write(mod, MIX_MIXMR, 0);
73 rsnd_mod_write(mod, MIX_MVPDR, 0);
74
75 rsnd_mix_volume_update(io, mod);
76
77 rsnd_mix_initialize_unlock(mod);
78
79 return 0;
80}
81
82static int rsnd_mix_quit(struct rsnd_mod *mod,
83 struct rsnd_dai_stream *io,
84 struct rsnd_priv *priv)
85{
86 rsnd_mod_hw_stop(mod);
87
88 return 0;
89}
90
91static struct rsnd_mod_ops rsnd_mix_ops = {
92 .name = MIX_NAME,
93 .init = rsnd_mix_init,
94 .quit = rsnd_mix_quit,
95};
96
97struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
98{
99 if (WARN_ON(id < 0 || id >= rsnd_mix_nr(priv)))
100 id = 0;
101
102 return &((struct rsnd_mix *)(priv->mix) + id)->mod;
103}
104
105static void rsnd_of_parse_mix(struct platform_device *pdev,
106 const struct rsnd_of_data *of_data,
107 struct rsnd_priv *priv)
108{
109 struct device_node *node;
110 struct rsnd_mix_platform_info *mix_info;
111 struct rcar_snd_info *info = rsnd_priv_to_info(priv);
112 struct device *dev = &pdev->dev;
113 int nr;
114
115 if (!of_data)
116 return;
117
118 node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
119 if (!node)
120 return;
121
122 nr = of_get_child_count(node);
123 if (!nr)
124 goto rsnd_of_parse_mix_end;
125
126 mix_info = devm_kzalloc(dev,
127 sizeof(struct rsnd_mix_platform_info) * nr,
128 GFP_KERNEL);
129 if (!mix_info) {
130 dev_err(dev, "mix info allocation error\n");
131 goto rsnd_of_parse_mix_end;
132 }
133
134 info->mix_info = mix_info;
135 info->mix_info_nr = nr;
136
137rsnd_of_parse_mix_end:
138 of_node_put(node);
139
140}
141
142int rsnd_mix_probe(struct platform_device *pdev,
143 const struct rsnd_of_data *of_data,
144 struct rsnd_priv *priv)
145{
146 struct rcar_snd_info *info = rsnd_priv_to_info(priv);
147 struct device *dev = rsnd_priv_to_dev(priv);
148 struct rsnd_mix *mix;
149 struct clk *clk;
150 char name[MIX_NAME_SIZE];
151 int i, nr, ret;
152
153 /* This driver doesn't support Gen1 at this point */
154 if (rsnd_is_gen1(priv)) {
155 dev_warn(dev, "MIX is not supported on Gen1\n");
156 return -EINVAL;
157 }
158
159 rsnd_of_parse_mix(pdev, of_data, priv);
160
161 nr = info->mix_info_nr;
162 if (!nr)
163 return 0;
164
165 mix = devm_kzalloc(dev, sizeof(*mix) * nr, GFP_KERNEL);
166 if (!mix)
167 return -ENOMEM;
168
169 priv->mix_nr = nr;
170 priv->mix = mix;
171
172 for_each_rsnd_mix(mix, priv, i) {
173 snprintf(name, MIX_NAME_SIZE, "%s.%d",
174 MIX_NAME, i);
175
176 clk = devm_clk_get(dev, name);
177 if (IS_ERR(clk))
178 return PTR_ERR(clk);
179
180 mix->info = &info->mix_info[i];
181
182 ret = rsnd_mod_init(priv, &mix->mod, &rsnd_mix_ops,
183 clk, RSND_MOD_MIX, i);
184 if (ret)
185 return ret;
186 }
187
188 return 0;
189}
190
191void rsnd_mix_remove(struct platform_device *pdev,
192 struct rsnd_priv *priv)
193{
194 struct rsnd_mix *mix;
195 int i;
196
197 for_each_rsnd_mix(mix, priv, i) {
198 rsnd_mod_quit(&mix->mod);
199 }
200}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 09fcc54a8ee0..7a0e52b4640a 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -47,6 +47,18 @@ enum rsnd_reg {
47 RSND_REG_SCU_SYS_STATUS0, 47 RSND_REG_SCU_SYS_STATUS0,
48 RSND_REG_SCU_SYS_INT_EN0, 48 RSND_REG_SCU_SYS_INT_EN0,
49 RSND_REG_CMD_ROUTE_SLCT, 49 RSND_REG_CMD_ROUTE_SLCT,
50 RSND_REG_CTU_CTUIR,
51 RSND_REG_CTU_ADINR,
52 RSND_REG_MIX_SWRSR,
53 RSND_REG_MIX_MIXIR,
54 RSND_REG_MIX_ADINR,
55 RSND_REG_MIX_MIXMR,
56 RSND_REG_MIX_MVPDR,
57 RSND_REG_MIX_MDBAR,
58 RSND_REG_MIX_MDBBR,
59 RSND_REG_MIX_MDBCR,
60 RSND_REG_MIX_MDBDR,
61 RSND_REG_MIX_MDBER,
50 RSND_REG_DVC_SWRSR, 62 RSND_REG_DVC_SWRSR,
51 RSND_REG_DVC_DVUIR, 63 RSND_REG_DVC_DVUIR,
52 RSND_REG_DVC_ADINR, 64 RSND_REG_DVC_ADINR,
@@ -99,6 +111,7 @@ enum rsnd_reg {
99 RSND_REG_SHARE26, 111 RSND_REG_SHARE26,
100 RSND_REG_SHARE27, 112 RSND_REG_SHARE27,
101 RSND_REG_SHARE28, 113 RSND_REG_SHARE28,
114 RSND_REG_SHARE29,
102 115
103 RSND_REG_MAX, 116 RSND_REG_MAX,
104}; 117};
@@ -119,7 +132,7 @@ enum rsnd_reg {
119#define RSND_REG_SSI_CTRL RSND_REG_SHARE02 132#define RSND_REG_SSI_CTRL RSND_REG_SHARE02
120#define RSND_REG_SSI_BUSIF_MODE RSND_REG_SHARE03 133#define RSND_REG_SSI_BUSIF_MODE RSND_REG_SHARE03
121#define RSND_REG_SSI_BUSIF_ADINR RSND_REG_SHARE04 134#define RSND_REG_SSI_BUSIF_ADINR RSND_REG_SHARE04
122#define RSND_REG_INT_ENABLE RSND_REG_SHARE05 135#define RSND_REG_SSI_INT_ENABLE RSND_REG_SHARE05
123#define RSND_REG_SRC_BSDSR RSND_REG_SHARE06 136#define RSND_REG_SRC_BSDSR RSND_REG_SHARE06
124#define RSND_REG_SRC_BSISR RSND_REG_SHARE07 137#define RSND_REG_SRC_BSISR RSND_REG_SHARE07
125#define RSND_REG_DIV_EN RSND_REG_SHARE08 138#define RSND_REG_DIV_EN RSND_REG_SHARE08
@@ -136,13 +149,14 @@ enum rsnd_reg {
136#define RSND_REG_AUDIO_CLK_SEL2 RSND_REG_SHARE19 149#define RSND_REG_AUDIO_CLK_SEL2 RSND_REG_SHARE19
137#define RSND_REG_CMD_CTRL RSND_REG_SHARE20 150#define RSND_REG_CMD_CTRL RSND_REG_SHARE20
138#define RSND_REG_CMDOUT_TIMSEL RSND_REG_SHARE21 151#define RSND_REG_CMDOUT_TIMSEL RSND_REG_SHARE21
139#define RSND_REG_BUSIF_DALIGN RSND_REG_SHARE22 152#define RSND_REG_SSI_BUSIF_DALIGN RSND_REG_SHARE22
140#define RSND_REG_DVC_VRCTR RSND_REG_SHARE23 153#define RSND_REG_DVC_VRCTR RSND_REG_SHARE23
141#define RSND_REG_DVC_VRPDR RSND_REG_SHARE24 154#define RSND_REG_DVC_VRPDR RSND_REG_SHARE24
142#define RSND_REG_DVC_VRDBR RSND_REG_SHARE25 155#define RSND_REG_DVC_VRDBR RSND_REG_SHARE25
143#define RSND_REG_SCU_SYS_STATUS1 RSND_REG_SHARE26 156#define RSND_REG_SCU_SYS_STATUS1 RSND_REG_SHARE26
144#define RSND_REG_SCU_SYS_INT_EN1 RSND_REG_SHARE27 157#define RSND_REG_SCU_SYS_INT_EN1 RSND_REG_SHARE27
145#define RSND_REG_SRC_INT_ENABLE0 RSND_REG_SHARE28 158#define RSND_REG_SRC_INT_ENABLE0 RSND_REG_SHARE28
159#define RSND_REG_SRC_BUSIF_DALIGN RSND_REG_SHARE29
146 160
147struct rsnd_of_data; 161struct rsnd_of_data;
148struct rsnd_priv; 162struct rsnd_priv;
@@ -157,27 +171,28 @@ struct rsnd_dai_stream;
157 rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r) 171 rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
158#define rsnd_mod_write(m, r, d) \ 172#define rsnd_mod_write(m, r, d) \
159 rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d) 173 rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
174#define rsnd_mod_force_write(m, r, d) \
175 rsnd_force_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
160#define rsnd_mod_bset(m, r, s, d) \ 176#define rsnd_mod_bset(m, r, s, d) \
161 rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d) 177 rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
162 178
163u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg); 179u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
164void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod, 180void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
165 enum rsnd_reg reg, u32 data); 181 enum rsnd_reg reg, u32 data);
182void rsnd_force_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
183 enum rsnd_reg reg, u32 data);
166void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg, 184void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
167 u32 mask, u32 data); 185 u32 mask, u32 data);
168u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 186u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
187u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
188u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
189void rsnd_path_parse(struct rsnd_priv *priv,
190 struct rsnd_dai_stream *io);
169 191
170/* 192/*
171 * R-Car DMA 193 * R-Car DMA
172 */ 194 */
173struct rsnd_dma; 195struct rsnd_dma;
174struct rsnd_dma_ops {
175 void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
176 void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
177 int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
178 struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
179 void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
180};
181 196
182struct rsnd_dmaen { 197struct rsnd_dmaen {
183 struct dma_chan *chan; 198 struct dma_chan *chan;
@@ -217,6 +232,8 @@ struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
217 */ 232 */
218enum rsnd_mod_type { 233enum rsnd_mod_type {
219 RSND_MOD_DVC = 0, 234 RSND_MOD_DVC = 0,
235 RSND_MOD_MIX,
236 RSND_MOD_CTU,
220 RSND_MOD_SRC, 237 RSND_MOD_SRC,
221 RSND_MOD_SSI, 238 RSND_MOD_SSI,
222 RSND_MOD_MAX, 239 RSND_MOD_MAX,
@@ -312,7 +329,7 @@ struct rsnd_mod {
312 329
313#define rsnd_mod_to_priv(mod) ((mod)->priv) 330#define rsnd_mod_to_priv(mod) ((mod)->priv)
314#define rsnd_mod_to_dma(mod) (&(mod)->dma) 331#define rsnd_mod_to_dma(mod) (&(mod)->dma)
315#define rsnd_mod_id(mod) ((mod)->id) 332#define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
316#define rsnd_mod_hw_start(mod) clk_enable((mod)->clk) 333#define rsnd_mod_hw_start(mod) clk_enable((mod)->clk)
317#define rsnd_mod_hw_stop(mod) clk_disable((mod)->clk) 334#define rsnd_mod_hw_stop(mod) clk_disable((mod)->clk)
318 335
@@ -345,9 +362,12 @@ struct rsnd_dai_stream {
345 int byte_per_period; 362 int byte_per_period;
346 int next_period_byte; 363 int next_period_byte;
347}; 364};
348#define rsnd_io_to_mod_ssi(io) ((io)->mod[RSND_MOD_SSI]) 365#define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
349#define rsnd_io_to_mod_src(io) ((io)->mod[RSND_MOD_SRC]) 366#define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI)
350#define rsnd_io_to_mod_dvc(io) ((io)->mod[RSND_MOD_DVC]) 367#define rsnd_io_to_mod_src(io) rsnd_io_to_mod((io), RSND_MOD_SRC)
368#define rsnd_io_to_mod_ctu(io) rsnd_io_to_mod((io), RSND_MOD_CTU)
369#define rsnd_io_to_mod_mix(io) rsnd_io_to_mod((io), RSND_MOD_MIX)
370#define rsnd_io_to_mod_dvc(io) rsnd_io_to_mod((io), RSND_MOD_DVC)
351#define rsnd_io_to_rdai(io) ((io)->rdai) 371#define rsnd_io_to_rdai(io) ((io)->rdai)
352#define rsnd_io_to_priv(io) (rsnd_rdai_to_priv(rsnd_io_to_rdai(io))) 372#define rsnd_io_to_priv(io) (rsnd_rdai_to_priv(rsnd_io_to_rdai(io)))
353#define rsnd_io_is_play(io) (&rsnd_io_to_rdai(io)->playback == io) 373#define rsnd_io_is_play(io) (&rsnd_io_to_rdai(io)->playback == io)
@@ -437,12 +457,6 @@ struct rsnd_priv {
437 void *gen; 457 void *gen;
438 458
439 /* 459 /*
440 * below value will be filled on rsnd_src_probe()
441 */
442 void *src;
443 int src_nr;
444
445 /*
446 * below value will be filled on rsnd_adg_probe() 460 * below value will be filled on rsnd_adg_probe()
447 */ 461 */
448 void *adg; 462 void *adg;
@@ -459,6 +473,24 @@ struct rsnd_priv {
459 int ssi_nr; 473 int ssi_nr;
460 474
461 /* 475 /*
476 * below value will be filled on rsnd_src_probe()
477 */
478 void *src;
479 int src_nr;
480
481 /*
482 * below value will be filled on rsnd_ctu_probe()
483 */
484 void *ctu;
485 int ctu_nr;
486
487 /*
488 * below value will be filled on rsnd_mix_probe()
489 */
490 void *mix;
491 int mix_nr;
492
493 /*
462 * below value will be filled on rsnd_dvc_probe() 494 * below value will be filled on rsnd_dvc_probe()
463 */ 495 */
464 void *dvc; 496 void *dvc;
@@ -531,6 +563,19 @@ int rsnd_kctrl_new_e(struct rsnd_mod *mod,
531 u32 max); 563 u32 max);
532 564
533/* 565/*
566 * R-Car SSI
567 */
568int rsnd_ssi_probe(struct platform_device *pdev,
569 const struct rsnd_of_data *of_data,
570 struct rsnd_priv *priv);
571void rsnd_ssi_remove(struct platform_device *pdev,
572 struct rsnd_priv *priv);
573struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
574int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
575int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
576int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
577
578/*
534 * R-Car SRC 579 * R-Car SRC
535 */ 580 */
536int rsnd_src_probe(struct platform_device *pdev, 581int rsnd_src_probe(struct platform_device *pdev,
@@ -550,20 +595,27 @@ int rsnd_src_ssiu_stop(struct rsnd_mod *ssi_mod,
550int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod); 595int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod);
551int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod); 596int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod);
552 597
553#define rsnd_src_nr(priv) ((priv)->src_nr) 598/*
599 * R-Car CTU
600 */
601int rsnd_ctu_probe(struct platform_device *pdev,
602 const struct rsnd_of_data *of_data,
603 struct rsnd_priv *priv);
604
605void rsnd_ctu_remove(struct platform_device *pdev,
606 struct rsnd_priv *priv);
607struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
554 608
555/* 609/*
556 * R-Car SSI 610 * R-Car MIX
557 */ 611 */
558int rsnd_ssi_probe(struct platform_device *pdev, 612int rsnd_mix_probe(struct platform_device *pdev,
559 const struct rsnd_of_data *of_data, 613 const struct rsnd_of_data *of_data,
560 struct rsnd_priv *priv); 614 struct rsnd_priv *priv);
561void rsnd_ssi_remove(struct platform_device *pdev, 615
616void rsnd_mix_remove(struct platform_device *pdev,
562 struct rsnd_priv *priv); 617 struct rsnd_priv *priv);
563struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id); 618struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
564int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
565int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
566int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
567 619
568/* 620/*
569 * R-Car DVC 621 * R-Car DVC
@@ -575,7 +627,4 @@ void rsnd_dvc_remove(struct platform_device *pdev,
575 struct rsnd_priv *priv); 627 struct rsnd_priv *priv);
576struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id); 628struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
577 629
578#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
579
580
581#endif 630#endif
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
index 84e935711e29..d61db9c385ea 100644
--- a/sound/soc/sh/rcar/rsrc-card.c
+++ b/sound/soc/sh/rcar/rsrc-card.c
@@ -41,6 +41,7 @@ static const struct rsrc_card_of_data routes_of_ssi0_ak4642 = {
41static const struct of_device_id rsrc_card_of_match[] = { 41static const struct of_device_id rsrc_card_of_match[] = {
42 { .compatible = "renesas,rsrc-card,lager", .data = &routes_of_ssi0_ak4642 }, 42 { .compatible = "renesas,rsrc-card,lager", .data = &routes_of_ssi0_ak4642 },
43 { .compatible = "renesas,rsrc-card,koelsch", .data = &routes_of_ssi0_ak4642 }, 43 { .compatible = "renesas,rsrc-card,koelsch", .data = &routes_of_ssi0_ak4642 },
44 { .compatible = "renesas,rsrc-card", },
44 {}, 45 {},
45}; 46};
46MODULE_DEVICE_TABLE(of, rsrc_card_of_match); 47MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
@@ -242,8 +243,15 @@ static int rsrc_card_parse_links(struct device_node *np,
242 snd_soc_of_get_dai_name(np, &dai_link->codec_dai_name); 243 snd_soc_of_get_dai_name(np, &dai_link->codec_dai_name);
243 244
244 /* additional name prefix */ 245 /* additional name prefix */
245 priv->codec_conf.of_node = dai_link->codec_of_node; 246 if (of_data) {
246 priv->codec_conf.name_prefix = of_data->prefix; 247 priv->codec_conf.of_node = dai_link->codec_of_node;
248 priv->codec_conf.name_prefix = of_data->prefix;
249 } else {
250 snd_soc_of_parse_audio_prefix(&priv->snd_card,
251 &priv->codec_conf,
252 dai_link->codec_of_node,
253 "audio-prefix");
254 }
247 255
248 /* set dai_name */ 256 /* set dai_name */
249 snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s", 257 snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s",
@@ -361,8 +369,14 @@ static int rsrc_card_parse_of(struct device_node *node,
361 priv->snd_card.num_links = num; 369 priv->snd_card.num_links = num;
362 priv->snd_card.codec_conf = &priv->codec_conf; 370 priv->snd_card.codec_conf = &priv->codec_conf;
363 priv->snd_card.num_configs = 1; 371 priv->snd_card.num_configs = 1;
364 priv->snd_card.of_dapm_routes = of_data->routes; 372
365 priv->snd_card.num_of_dapm_routes = of_data->num_routes; 373 if (of_data) {
374 priv->snd_card.of_dapm_routes = of_data->routes;
375 priv->snd_card.num_of_dapm_routes = of_data->num_routes;
376 } else {
377 snd_soc_of_parse_audio_routing(&priv->snd_card,
378 "audio-routing");
379 }
366 380
367 /* Parse the card name from DT */ 381 /* Parse the card name from DT */
368 snd_soc_of_parse_card_name(&priv->snd_card, "card-name"); 382 snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index c61c17180142..89a18e102feb 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -30,6 +30,7 @@ struct rsnd_src {
30 30
31#define RSND_SRC_NAME_SIZE 16 31#define RSND_SRC_NAME_SIZE 16
32 32
33#define rsnd_src_nr(priv) ((priv)->src_nr)
33#define rsnd_enable_sync_convert(src) ((src)->sen.val) 34#define rsnd_enable_sync_convert(src) ((src)->sen.val)
34#define rsnd_src_of_node(priv) \ 35#define rsnd_src_of_node(priv) \
35 of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src") 36 of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
@@ -117,6 +118,20 @@ struct rsnd_src {
117/* 118/*
118 * Gen1/Gen2 common functions 119 * Gen1/Gen2 common functions
119 */ 120 */
121static void rsnd_src_soft_reset(struct rsnd_mod *mod)
122{
123 rsnd_mod_write(mod, SRC_SWRSR, 0);
124 rsnd_mod_write(mod, SRC_SWRSR, 1);
125}
126
127
128#define rsnd_src_initialize_lock(mod) __rsnd_src_initialize_lock(mod, 1)
129#define rsnd_src_initialize_unlock(mod) __rsnd_src_initialize_lock(mod, 0)
130static void __rsnd_src_initialize_lock(struct rsnd_mod *mod, u32 enable)
131{
132 rsnd_mod_write(mod, SRC_SRCIR, enable);
133}
134
120static struct dma_chan *rsnd_src_dma_req(struct rsnd_dai_stream *io, 135static struct dma_chan *rsnd_src_dma_req(struct rsnd_dai_stream *io,
121 struct rsnd_mod *mod) 136 struct rsnd_mod *mod)
122{ 137{
@@ -133,7 +148,6 @@ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
133 int use_busif) 148 int use_busif)
134{ 149{
135 struct rsnd_dai *rdai = rsnd_io_to_rdai(io); 150 struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
136 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
137 int ssi_id = rsnd_mod_id(ssi_mod); 151 int ssi_id = rsnd_mod_id(ssi_mod);
138 152
139 /* 153 /*
@@ -170,27 +184,14 @@ int rsnd_src_ssiu_start(struct rsnd_mod *ssi_mod,
170 * DMA settings for SSIU 184 * DMA settings for SSIU
171 */ 185 */
172 if (use_busif) { 186 if (use_busif) {
173 u32 val = 0x76543210; 187 u32 val = rsnd_get_dalign(ssi_mod, io);
174 u32 mask = ~0;
175 188
176 rsnd_mod_write(ssi_mod, SSI_BUSIF_ADINR, 189 rsnd_mod_write(ssi_mod, SSI_BUSIF_ADINR,
177 rsnd_get_adinr(ssi_mod, io)); 190 rsnd_get_adinr_bit(ssi_mod, io));
178 rsnd_mod_write(ssi_mod, SSI_BUSIF_MODE, 1); 191 rsnd_mod_write(ssi_mod, SSI_BUSIF_MODE, 1);
179 rsnd_mod_write(ssi_mod, SSI_CTRL, 0x1); 192 rsnd_mod_write(ssi_mod, SSI_CTRL, 0x1);
180 193
181 mask <<= runtime->channels * 4; 194 rsnd_mod_write(ssi_mod, SSI_BUSIF_DALIGN, val);
182 val = val & mask;
183
184 switch (runtime->sample_bits) {
185 case 16:
186 val |= 0x67452301 & ~mask;
187 break;
188 case 32:
189 val |= 0x76543210 & ~mask;
190 break;
191 }
192 rsnd_mod_write(ssi_mod, BUSIF_DALIGN, val);
193
194 } 195 }
195 196
196 return 0; 197 return 0;
@@ -215,10 +216,9 @@ int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod)
215 return 0; 216 return 0;
216 217
217 /* enable SSI interrupt if Gen2 */ 218 /* enable SSI interrupt if Gen2 */
218 if (rsnd_ssi_is_dma_mode(ssi_mod)) 219 rsnd_mod_write(ssi_mod, SSI_INT_ENABLE,
219 rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0e000000); 220 rsnd_ssi_is_dma_mode(ssi_mod) ?
220 else 221 0x0e000000 : 0x0f000000);
221 rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0f000000);
222 222
223 return 0; 223 return 0;
224} 224}
@@ -231,7 +231,7 @@ int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod)
231 return 0; 231 return 0;
232 232
233 /* disable SSI interrupt if Gen2 */ 233 /* disable SSI interrupt if Gen2 */
234 rsnd_mod_write(ssi_mod, INT_ENABLE, 0x00000000); 234 rsnd_mod_write(ssi_mod, SSI_INT_ENABLE, 0x00000000);
235 235
236 return 0; 236 return 0;
237} 237}
@@ -294,12 +294,8 @@ static int rsnd_src_set_convert_rate(struct rsnd_mod *mod,
294 if (convert_rate) 294 if (convert_rate)
295 fsrate = 0x0400000 / convert_rate * runtime->rate; 295 fsrate = 0x0400000 / convert_rate * runtime->rate;
296 296
297 /* set/clear soft reset */
298 rsnd_mod_write(mod, SRC_SWRSR, 0);
299 rsnd_mod_write(mod, SRC_SWRSR, 1);
300
301 /* Set channel number and output bit length */ 297 /* Set channel number and output bit length */
302 rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr(mod, io)); 298 rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr_bit(mod, io));
303 299
304 /* Enable the initial value of IFS */ 300 /* Enable the initial value of IFS */
305 if (fsrate) { 301 if (fsrate) {
@@ -358,17 +354,15 @@ static int rsnd_src_init(struct rsnd_mod *mod,
358 354
359 rsnd_mod_hw_start(mod); 355 rsnd_mod_hw_start(mod);
360 356
357 rsnd_src_soft_reset(mod);
358
359 rsnd_src_initialize_lock(mod);
360
361 src->err = 0; 361 src->err = 0;
362 362
363 /* reset sync convert_rate */ 363 /* reset sync convert_rate */
364 src->sync.val = 0; 364 src->sync.val = 0;
365 365
366 /*
367 * Initialize the operation of the SRC internal circuits
368 * see rsnd_src_start()
369 */
370 rsnd_mod_write(mod, SRC_SRCIR, 1);
371
372 return 0; 366 return 0;
373} 367}
374 368
@@ -395,11 +389,7 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
395 389
396static int rsnd_src_start(struct rsnd_mod *mod) 390static int rsnd_src_start(struct rsnd_mod *mod)
397{ 391{
398 /* 392 rsnd_src_initialize_unlock(mod);
399 * Cancel the initialization and operate the SRC function
400 * see rsnd_src_init()
401 */
402 rsnd_mod_write(mod, SRC_SRCIR, 0);
403 393
404 return 0; 394 return 0;
405} 395}
@@ -617,6 +607,14 @@ static void rsnd_src_irq_ctrol_gen2(struct rsnd_mod *mod, int enable)
617 int_val = 0; 607 int_val = 0;
618 } 608 }
619 609
610 /*
611 * WORKAROUND
612 *
613 * ignore over flow error when rsnd_enable_sync_convert()
614 */
615 if (rsnd_enable_sync_convert(src))
616 sys_int_val = sys_int_val & 0xffff;
617
620 rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val); 618 rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val);
621 rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val); 619 rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val);
622 rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val); 620 rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val);
@@ -632,11 +630,22 @@ static void rsnd_src_error_clear_gen2(struct rsnd_mod *mod)
632 630
633static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod) 631static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod)
634{ 632{
635 u32 val = OUF_SRC(rsnd_mod_id(mod)); 633 struct rsnd_src *src = rsnd_mod_to_src(mod);
634 u32 val0, val1;
636 bool ret = false; 635 bool ret = false;
637 636
638 if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val) || 637 val0 = val1 = OUF_SRC(rsnd_mod_id(mod));
639 (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val)) { 638
639 /*
640 * WORKAROUND
641 *
642 * ignore over flow error when rsnd_enable_sync_convert()
643 */
644 if (rsnd_enable_sync_convert(src))
645 val0 = val0 & 0xffff;
646
647 if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val0) ||
648 (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val1)) {
640 struct rsnd_src *src = rsnd_mod_to_src(mod); 649 struct rsnd_src *src = rsnd_mod_to_src(mod);
641 650
642 src->err++; 651 src->err++;
@@ -652,7 +661,20 @@ static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod)
652static int _rsnd_src_start_gen2(struct rsnd_mod *mod, 661static int _rsnd_src_start_gen2(struct rsnd_mod *mod,
653 struct rsnd_dai_stream *io) 662 struct rsnd_dai_stream *io)
654{ 663{
655 u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11; 664 struct rsnd_src *src = rsnd_mod_to_src(mod);
665 u32 val;
666
667 val = rsnd_get_dalign(mod, io);
668
669 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, val);
670
671 /*
672 * WORKAROUND
673 *
674 * Enable SRC output if you want to use sync convert together with DVC
675 */
676 val = (rsnd_io_to_mod_dvc(io) && !rsnd_enable_sync_convert(src)) ?
677 0x01 : 0x11;
656 678
657 rsnd_mod_write(mod, SRC_CTRL, val); 679 rsnd_mod_write(mod, SRC_CTRL, val);
658 680
@@ -922,13 +944,6 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
922 return 0; 944 return 0;
923 945
924 /* 946 /*
925 * We can't use SRC sync convert
926 * if it has DVC
927 */
928 if (rsnd_io_to_mod_dvc(io))
929 return 0;
930
931 /*
932 * enable sync convert 947 * enable sync convert
933 */ 948 */
934 ret = rsnd_kctrl_new_s(mod, io, rtd, 949 ret = rsnd_kctrl_new_s(mod, io, rtd,
@@ -1047,10 +1062,8 @@ int rsnd_src_probe(struct platform_device *pdev,
1047 return 0; 1062 return 0;
1048 1063
1049 src = devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL); 1064 src = devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL);
1050 if (!src) { 1065 if (!src)
1051 dev_err(dev, "SRC allocate failed\n");
1052 return -ENOMEM; 1066 return -ENOMEM;
1053 }
1054 1067
1055 priv->src_nr = nr; 1068 priv->src_nr = nr;
1056 priv->src = src; 1069 priv->src = src;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 2fbe59f7f9b5..d45b9a7e324e 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -770,10 +770,8 @@ int rsnd_ssi_probe(struct platform_device *pdev,
770 */ 770 */
771 nr = info->ssi_info_nr; 771 nr = info->ssi_info_nr;
772 ssi = devm_kzalloc(dev, sizeof(*ssi) * nr, GFP_KERNEL); 772 ssi = devm_kzalloc(dev, sizeof(*ssi) * nr, GFP_KERNEL);
773 if (!ssi) { 773 if (!ssi)
774 dev_err(dev, "SSI allocate failed\n");
775 return -ENOMEM; 774 return -ENOMEM;
776 }
777 775
778 priv->ssi = ssi; 776 priv->ssi = ssi;
779 priv->ssi_nr = nr; 777 priv->ssi_nr = nr;
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index ab13146e4f82..89ed1b107ac5 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -385,14 +385,9 @@ static const struct snd_soc_component_driver sh4_ssi_component = {
385 385
386static int sh4_soc_dai_probe(struct platform_device *pdev) 386static int sh4_soc_dai_probe(struct platform_device *pdev)
387{ 387{
388 return snd_soc_register_component(&pdev->dev, &sh4_ssi_component, 388 return devm_snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
389 sh4_ssi_dai, ARRAY_SIZE(sh4_ssi_dai)); 389 sh4_ssi_dai,
390} 390 ARRAY_SIZE(sh4_ssi_dai));
391
392static int sh4_soc_dai_remove(struct platform_device *pdev)
393{
394 snd_soc_unregister_component(&pdev->dev);
395 return 0;
396} 391}
397 392
398static struct platform_driver sh4_ssi_driver = { 393static struct platform_driver sh4_ssi_driver = {
@@ -401,7 +396,6 @@ static struct platform_driver sh4_ssi_driver = {
401 }, 396 },
402 397
403 .probe = sh4_soc_dai_probe, 398 .probe = sh4_soc_dai_probe,
404 .remove = sh4_soc_dai_remove,
405}; 399};
406 400
407module_platform_driver(sh4_ssi_driver); 401module_platform_driver(sh4_ssi_driver);
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 08d7259bbaab..d40efc9fe0a9 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -85,10 +85,19 @@ EXPORT_SYMBOL(snd_soc_alloc_ac97_codec);
85/** 85/**
86 * snd_soc_new_ac97_codec - initailise AC97 device 86 * snd_soc_new_ac97_codec - initailise AC97 device
87 * @codec: audio codec 87 * @codec: audio codec
88 * @id: The expected device ID
89 * @id_mask: Mask that is applied to the device ID before comparing with @id
88 * 90 *
89 * Initialises AC97 codec resources for use by ad-hoc devices only. 91 * Initialises AC97 codec resources for use by ad-hoc devices only.
92 *
93 * If @id is not 0 this function will reset the device, then read the ID from
94 * the device and check if it matches the expected ID. If it doesn't match an
95 * error will be returned and device will not be registered.
96 *
97 * Returns: A PTR_ERR() on failure or a valid snd_ac97 struct on success.
90 */ 98 */
91struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec) 99struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
100 unsigned int id, unsigned int id_mask)
92{ 101{
93 struct snd_ac97 *ac97; 102 struct snd_ac97 *ac97;
94 int ret; 103 int ret;
@@ -97,13 +106,24 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
97 if (IS_ERR(ac97)) 106 if (IS_ERR(ac97))
98 return ac97; 107 return ac97;
99 108
100 ret = device_add(&ac97->dev); 109 if (id) {
101 if (ret) { 110 ret = snd_ac97_reset(ac97, false, id, id_mask);
102 put_device(&ac97->dev); 111 if (ret < 0) {
103 return ERR_PTR(ret); 112 dev_err(codec->dev, "Failed to reset AC97 device: %d\n",
113 ret);
114 goto err_put_device;
115 }
104 } 116 }
105 117
118 ret = device_add(&ac97->dev);
119 if (ret)
120 goto err_put_device;
121
106 return ac97; 122 return ac97;
123
124err_put_device:
125 put_device(&ac97->dev);
126 return ERR_PTR(ret);
107} 127}
108EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec); 128EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
109 129
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3a4a5c0e3f97..6173d15236c3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -654,10 +654,12 @@ int snd_soc_suspend(struct device *dev)
654 654
655 /* suspend all CODECs */ 655 /* suspend all CODECs */
656 list_for_each_entry(codec, &card->codec_dev_list, card_list) { 656 list_for_each_entry(codec, &card->codec_dev_list, card_list) {
657 struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
658
657 /* If there are paths active then the CODEC will be held with 659 /* If there are paths active then the CODEC will be held with
658 * bias _ON and should not be suspended. */ 660 * bias _ON and should not be suspended. */
659 if (!codec->suspended) { 661 if (!codec->suspended) {
660 switch (codec->dapm.bias_level) { 662 switch (snd_soc_dapm_get_bias_level(dapm)) {
661 case SND_SOC_BIAS_STANDBY: 663 case SND_SOC_BIAS_STANDBY:
662 /* 664 /*
663 * If the CODEC is capable of idle 665 * If the CODEC is capable of idle
@@ -665,7 +667,7 @@ int snd_soc_suspend(struct device *dev)
665 * means it's doing something, 667 * means it's doing something,
666 * otherwise fall through. 668 * otherwise fall through.
667 */ 669 */
668 if (codec->dapm.idle_bias_off) { 670 if (dapm->idle_bias_off) {
669 dev_dbg(codec->dev, 671 dev_dbg(codec->dev,
670 "ASoC: idle_bias_off CODEC on over suspend\n"); 672 "ASoC: idle_bias_off CODEC on over suspend\n");
671 break; 673 break;
@@ -978,7 +980,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
978 980
979static void soc_remove_component(struct snd_soc_component *component) 981static void soc_remove_component(struct snd_soc_component *component)
980{ 982{
981 if (!component->probed) 983 if (!component->card)
982 return; 984 return;
983 985
984 /* This is a HACK and will be removed soon */ 986 /* This is a HACK and will be removed soon */
@@ -991,7 +993,7 @@ static void soc_remove_component(struct snd_soc_component *component)
991 snd_soc_dapm_free(snd_soc_component_get_dapm(component)); 993 snd_soc_dapm_free(snd_soc_component_get_dapm(component));
992 994
993 soc_cleanup_component_debugfs(component); 995 soc_cleanup_component_debugfs(component);
994 component->probed = 0; 996 component->card = NULL;
995 module_put(component->dev->driver->owner); 997 module_put(component->dev->driver->owner);
996} 998}
997 999
@@ -1102,16 +1104,26 @@ static int soc_probe_component(struct snd_soc_card *card,
1102 struct snd_soc_dai *dai; 1104 struct snd_soc_dai *dai;
1103 int ret; 1105 int ret;
1104 1106
1105 if (component->probed) 1107 if (!strcmp(component->name, "snd-soc-dummy"))
1106 return 0; 1108 return 0;
1107 1109
1108 component->card = card; 1110 if (component->card) {
1109 dapm->card = card; 1111 if (component->card != card) {
1110 soc_set_name_prefix(card, component); 1112 dev_err(component->dev,
1113 "Trying to bind component to card \"%s\" but is already bound to card \"%s\"\n",
1114 card->name, component->card->name);
1115 return -ENODEV;
1116 }
1117 return 0;
1118 }
1111 1119
1112 if (!try_module_get(component->dev->driver->owner)) 1120 if (!try_module_get(component->dev->driver->owner))
1113 return -ENODEV; 1121 return -ENODEV;
1114 1122
1123 component->card = card;
1124 dapm->card = card;
1125 soc_set_name_prefix(card, component);
1126
1115 soc_init_component_debugfs(component); 1127 soc_init_component_debugfs(component);
1116 1128
1117 if (component->dapm_widgets) { 1129 if (component->dapm_widgets) {
@@ -1155,7 +1167,6 @@ static int soc_probe_component(struct snd_soc_card *card,
1155 snd_soc_dapm_add_routes(dapm, component->dapm_routes, 1167 snd_soc_dapm_add_routes(dapm, component->dapm_routes,
1156 component->num_dapm_routes); 1168 component->num_dapm_routes);
1157 1169
1158 component->probed = 1;
1159 list_add(&dapm->list, &card->dapm_list); 1170 list_add(&dapm->list, &card->dapm_list);
1160 1171
1161 /* This is a HACK and will be removed soon */ 1172 /* This is a HACK and will be removed soon */
@@ -1166,6 +1177,7 @@ static int soc_probe_component(struct snd_soc_card *card,
1166 1177
1167err_probe: 1178err_probe:
1168 soc_cleanup_component_debugfs(component); 1179 soc_cleanup_component_debugfs(component);
1180 component->card = NULL;
1169 module_put(component->dev->driver->owner); 1181 module_put(component->dev->driver->owner);
1170 1182
1171 return ret; 1183 return ret;
@@ -1449,7 +1461,7 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
1449 rtd->dev_registered = 0; 1461 rtd->dev_registered = 0;
1450 } 1462 }
1451 1463
1452 if (component && component->probed) 1464 if (component)
1453 soc_remove_component(component); 1465 soc_remove_component(component);
1454} 1466}
1455 1467
@@ -1716,6 +1728,7 @@ card_probe_error:
1716 if (card->remove) 1728 if (card->remove)
1717 card->remove(card); 1729 card->remove(card);
1718 1730
1731 snd_soc_dapm_free(&card->dapm);
1719 soc_cleanup_card_debugfs(card); 1732 soc_cleanup_card_debugfs(card);
1720 snd_card_free(card->snd_card); 1733 snd_card_free(card->snd_card);
1721 1734
@@ -2127,7 +2140,7 @@ EXPORT_SYMBOL_GPL(snd_soc_codec_set_pll);
2127/** 2140/**
2128 * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio. 2141 * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio.
2129 * @dai: DAI 2142 * @dai: DAI
2130 * @ratio Ratio of BCLK to Sample rate. 2143 * @ratio: Ratio of BCLK to Sample rate.
2131 * 2144 *
2132 * Configures the DAI for a preset BCLK to sample rate ratio. 2145 * Configures the DAI for a preset BCLK to sample rate ratio.
2133 */ 2146 */
@@ -2651,10 +2664,7 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
2651 component->probe = component->driver->probe; 2664 component->probe = component->driver->probe;
2652 component->remove = component->driver->remove; 2665 component->remove = component->driver->remove;
2653 2666
2654 if (!component->dapm_ptr) 2667 dapm = &component->dapm;
2655 component->dapm_ptr = &component->dapm;
2656
2657 dapm = component->dapm_ptr;
2658 dapm->dev = dev; 2668 dapm->dev = dev;
2659 dapm->component = component; 2669 dapm->component = component;
2660 dapm->bias_level = SND_SOC_BIAS_OFF; 2670 dapm->bias_level = SND_SOC_BIAS_OFF;
@@ -2798,6 +2808,7 @@ EXPORT_SYMBOL_GPL(snd_soc_register_component);
2798/** 2808/**
2799 * snd_soc_unregister_component - Unregister a component from the ASoC core 2809 * snd_soc_unregister_component - Unregister a component from the ASoC core
2800 * 2810 *
2811 * @dev: The device to unregister
2801 */ 2812 */
2802void snd_soc_unregister_component(struct device *dev) 2813void snd_soc_unregister_component(struct device *dev)
2803{ 2814{
@@ -2838,7 +2849,7 @@ static void snd_soc_platform_drv_remove(struct snd_soc_component *component)
2838 * snd_soc_add_platform - Add a platform to the ASoC core 2849 * snd_soc_add_platform - Add a platform to the ASoC core
2839 * @dev: The parent device for the platform 2850 * @dev: The parent device for the platform
2840 * @platform: The platform to add 2851 * @platform: The platform to add
2841 * @platform_driver: The driver for the platform 2852 * @platform_drv: The driver for the platform
2842 */ 2853 */
2843int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform, 2854int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
2844 const struct snd_soc_platform_driver *platform_drv) 2855 const struct snd_soc_platform_driver *platform_drv)
@@ -2877,7 +2888,8 @@ EXPORT_SYMBOL_GPL(snd_soc_add_platform);
2877/** 2888/**
2878 * snd_soc_register_platform - Register a platform with the ASoC core 2889 * snd_soc_register_platform - Register a platform with the ASoC core
2879 * 2890 *
2880 * @platform: platform to register 2891 * @dev: The device for the platform
2892 * @platform_drv: The driver for the platform
2881 */ 2893 */
2882int snd_soc_register_platform(struct device *dev, 2894int snd_soc_register_platform(struct device *dev,
2883 const struct snd_soc_platform_driver *platform_drv) 2895 const struct snd_soc_platform_driver *platform_drv)
@@ -2938,7 +2950,7 @@ EXPORT_SYMBOL_GPL(snd_soc_lookup_platform);
2938/** 2950/**
2939 * snd_soc_unregister_platform - Unregister a platform from the ASoC core 2951 * snd_soc_unregister_platform - Unregister a platform from the ASoC core
2940 * 2952 *
2941 * @platform: platform to unregister 2953 * @dev: platform to unregister
2942 */ 2954 */
2943void snd_soc_unregister_platform(struct device *dev) 2955void snd_soc_unregister_platform(struct device *dev)
2944{ 2956{
@@ -3029,13 +3041,17 @@ static int snd_soc_codec_set_bias_level(struct snd_soc_dapm_context *dapm,
3029/** 3041/**
3030 * snd_soc_register_codec - Register a codec with the ASoC core 3042 * snd_soc_register_codec - Register a codec with the ASoC core
3031 * 3043 *
3032 * @codec: codec to register 3044 * @dev: The parent device for this codec
3045 * @codec_drv: Codec driver
3046 * @dai_drv: The associated DAI driver
3047 * @num_dai: Number of DAIs
3033 */ 3048 */
3034int snd_soc_register_codec(struct device *dev, 3049int snd_soc_register_codec(struct device *dev,
3035 const struct snd_soc_codec_driver *codec_drv, 3050 const struct snd_soc_codec_driver *codec_drv,
3036 struct snd_soc_dai_driver *dai_drv, 3051 struct snd_soc_dai_driver *dai_drv,
3037 int num_dai) 3052 int num_dai)
3038{ 3053{
3054 struct snd_soc_dapm_context *dapm;
3039 struct snd_soc_codec *codec; 3055 struct snd_soc_codec *codec;
3040 struct snd_soc_dai *dai; 3056 struct snd_soc_dai *dai;
3041 int ret, i; 3057 int ret, i;
@@ -3046,7 +3062,6 @@ int snd_soc_register_codec(struct device *dev,
3046 if (codec == NULL) 3062 if (codec == NULL)
3047 return -ENOMEM; 3063 return -ENOMEM;
3048 3064
3049 codec->component.dapm_ptr = &codec->dapm;
3050 codec->component.codec = codec; 3065 codec->component.codec = codec;
3051 3066
3052 ret = snd_soc_component_initialize(&codec->component, 3067 ret = snd_soc_component_initialize(&codec->component,
@@ -3076,12 +3091,14 @@ int snd_soc_register_codec(struct device *dev,
3076 if (codec_drv->read) 3091 if (codec_drv->read)
3077 codec->component.read = snd_soc_codec_drv_read; 3092 codec->component.read = snd_soc_codec_drv_read;
3078 codec->component.ignore_pmdown_time = codec_drv->ignore_pmdown_time; 3093 codec->component.ignore_pmdown_time = codec_drv->ignore_pmdown_time;
3079 codec->dapm.idle_bias_off = codec_drv->idle_bias_off; 3094
3080 codec->dapm.suspend_bias_off = codec_drv->suspend_bias_off; 3095 dapm = snd_soc_codec_get_dapm(codec);
3096 dapm->idle_bias_off = codec_drv->idle_bias_off;
3097 dapm->suspend_bias_off = codec_drv->suspend_bias_off;
3081 if (codec_drv->seq_notifier) 3098 if (codec_drv->seq_notifier)
3082 codec->dapm.seq_notifier = codec_drv->seq_notifier; 3099 dapm->seq_notifier = codec_drv->seq_notifier;
3083 if (codec_drv->set_bias_level) 3100 if (codec_drv->set_bias_level)
3084 codec->dapm.set_bias_level = snd_soc_codec_set_bias_level; 3101 dapm->set_bias_level = snd_soc_codec_set_bias_level;
3085 codec->dev = dev; 3102 codec->dev = dev;
3086 codec->driver = codec_drv; 3103 codec->driver = codec_drv;
3087 codec->component.val_bytes = codec_drv->reg_word_size; 3104 codec->component.val_bytes = codec_drv->reg_word_size;
@@ -3128,7 +3145,7 @@ EXPORT_SYMBOL_GPL(snd_soc_register_codec);
3128/** 3145/**
3129 * snd_soc_unregister_codec - Unregister a codec from the ASoC core 3146 * snd_soc_unregister_codec - Unregister a codec from the ASoC core
3130 * 3147 *
3131 * @codec: codec to unregister 3148 * @dev: codec to unregister
3132 */ 3149 */
3133void snd_soc_unregister_codec(struct device *dev) 3150void snd_soc_unregister_codec(struct device *dev)
3134{ 3151{
@@ -3303,6 +3320,26 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
3303} 3320}
3304EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot); 3321EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
3305 3322
3323void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
3324 struct snd_soc_codec_conf *codec_conf,
3325 struct device_node *of_node,
3326 const char *propname)
3327{
3328 struct device_node *np = card->dev->of_node;
3329 const char *str;
3330 int ret;
3331
3332 ret = of_property_read_string(np, propname, &str);
3333 if (ret < 0) {
3334 /* no prefix is not error */
3335 return;
3336 }
3337
3338 codec_conf->of_node = of_node;
3339 codec_conf->name_prefix = str;
3340}
3341EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
3342
3306int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, 3343int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
3307 const char *propname) 3344 const char *propname)
3308{ 3345{
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index aa327c92480c..f4bf21a5539b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -47,6 +47,13 @@
47 47
48#define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++; 48#define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++;
49 49
50#define SND_SOC_DAPM_DIR_REVERSE(x) ((x == SND_SOC_DAPM_DIR_IN) ? \
51 SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN)
52
53#define snd_soc_dapm_for_each_direction(dir) \
54 for ((dir) = SND_SOC_DAPM_DIR_IN; (dir) <= SND_SOC_DAPM_DIR_OUT; \
55 (dir)++)
56
50static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm, 57static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
51 struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink, 58 struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
52 const char *control, 59 const char *control,
@@ -167,45 +174,59 @@ static void dapm_mark_dirty(struct snd_soc_dapm_widget *w, const char *reason)
167} 174}
168 175
169/* 176/*
170 * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input 177 * Common implementation for dapm_widget_invalidate_input_paths() and
171 * paths 178 * dapm_widget_invalidate_output_paths(). The function is inlined since the
172 * @w: The widget for which to invalidate the cached number of input paths 179 * combined size of the two specialized functions is only marginally larger then
173 * 180 * the size of the generic function and at the same time the fast path of the
174 * The function resets the cached number of inputs for the specified widget and 181 * specialized functions is significantly smaller than the generic function.
175 * all widgets that can be reached via outgoing paths from the widget.
176 *
177 * This function must be called if the number of input paths for a widget might
178 * have changed. E.g. if the source state of a widget changes or a path is added
179 * or activated with the widget as the sink.
180 */ 182 */
181static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w) 183static __always_inline void dapm_widget_invalidate_paths(
184 struct snd_soc_dapm_widget *w, enum snd_soc_dapm_direction dir)
182{ 185{
183 struct snd_soc_dapm_widget *sink; 186 enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
187 struct snd_soc_dapm_widget *node;
184 struct snd_soc_dapm_path *p; 188 struct snd_soc_dapm_path *p;
185 LIST_HEAD(list); 189 LIST_HEAD(list);
186 190
187 dapm_assert_locked(w->dapm); 191 dapm_assert_locked(w->dapm);
188 192
189 if (w->inputs == -1) 193 if (w->endpoints[dir] == -1)
190 return; 194 return;
191 195
192 w->inputs = -1;
193 list_add_tail(&w->work_list, &list); 196 list_add_tail(&w->work_list, &list);
197 w->endpoints[dir] = -1;
194 198
195 list_for_each_entry(w, &list, work_list) { 199 list_for_each_entry(w, &list, work_list) {
196 list_for_each_entry(p, &w->sinks, list_source) { 200 snd_soc_dapm_widget_for_each_path(w, dir, p) {
197 if (p->is_supply || p->weak || !p->connect) 201 if (p->is_supply || p->weak || !p->connect)
198 continue; 202 continue;
199 sink = p->sink; 203 node = p->node[rdir];
200 if (sink->inputs != -1) { 204 if (node->endpoints[dir] != -1) {
201 sink->inputs = -1; 205 node->endpoints[dir] = -1;
202 list_add_tail(&sink->work_list, &list); 206 list_add_tail(&node->work_list, &list);
203 } 207 }
204 } 208 }
205 } 209 }
206} 210}
207 211
208/* 212/*
213 * dapm_widget_invalidate_input_paths() - Invalidate the cached number of
214 * input paths
215 * @w: The widget for which to invalidate the cached number of input paths
216 *
217 * Resets the cached number of inputs for the specified widget and all widgets
218 * that can be reached via outcoming paths from the widget.
219 *
220 * This function must be called if the number of output paths for a widget might
221 * have changed. E.g. if the source state of a widget changes or a path is added
222 * or activated with the widget as the sink.
223 */
224static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
225{
226 dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_IN);
227}
228
229/*
209 * dapm_widget_invalidate_output_paths() - Invalidate the cached number of 230 * dapm_widget_invalidate_output_paths() - Invalidate the cached number of
210 * output paths 231 * output paths
211 * @w: The widget for which to invalidate the cached number of output paths 232 * @w: The widget for which to invalidate the cached number of output paths
@@ -219,29 +240,7 @@ static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
219 */ 240 */
220static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w) 241static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w)
221{ 242{
222 struct snd_soc_dapm_widget *source; 243 dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_OUT);
223 struct snd_soc_dapm_path *p;
224 LIST_HEAD(list);
225
226 dapm_assert_locked(w->dapm);
227
228 if (w->outputs == -1)
229 return;
230
231 w->outputs = -1;
232 list_add_tail(&w->work_list, &list);
233
234 list_for_each_entry(w, &list, work_list) {
235 list_for_each_entry(p, &w->sources, list_sink) {
236 if (p->is_supply || p->weak || !p->connect)
237 continue;
238 source = p->source;
239 if (source->outputs != -1) {
240 source->outputs = -1;
241 list_add_tail(&source->work_list, &list);
242 }
243 }
244 }
245} 244}
246 245
247/* 246/*
@@ -270,9 +269,9 @@ static void dapm_path_invalidate(struct snd_soc_dapm_path *p)
270 * endpoints is either connected or disconnected that sum won't change, 269 * endpoints is either connected or disconnected that sum won't change,
271 * so there is no need to re-check the path. 270 * so there is no need to re-check the path.
272 */ 271 */
273 if (p->source->inputs != 0) 272 if (p->source->endpoints[SND_SOC_DAPM_DIR_IN] != 0)
274 dapm_widget_invalidate_input_paths(p->sink); 273 dapm_widget_invalidate_input_paths(p->sink);
275 if (p->sink->outputs != 0) 274 if (p->sink->endpoints[SND_SOC_DAPM_DIR_OUT] != 0)
276 dapm_widget_invalidate_output_paths(p->source); 275 dapm_widget_invalidate_output_paths(p->source);
277} 276}
278 277
@@ -283,11 +282,11 @@ void dapm_mark_endpoints_dirty(struct snd_soc_card *card)
283 mutex_lock(&card->dapm_mutex); 282 mutex_lock(&card->dapm_mutex);
284 283
285 list_for_each_entry(w, &card->widgets, list) { 284 list_for_each_entry(w, &card->widgets, list) {
286 if (w->is_sink || w->is_source) { 285 if (w->is_ep) {
287 dapm_mark_dirty(w, "Rechecking endpoints"); 286 dapm_mark_dirty(w, "Rechecking endpoints");
288 if (w->is_sink) 287 if (w->is_ep & SND_SOC_DAPM_EP_SINK)
289 dapm_widget_invalidate_output_paths(w); 288 dapm_widget_invalidate_output_paths(w);
290 if (w->is_source) 289 if (w->is_ep & SND_SOC_DAPM_EP_SOURCE)
291 dapm_widget_invalidate_input_paths(w); 290 dapm_widget_invalidate_input_paths(w);
292 } 291 }
293 } 292 }
@@ -358,9 +357,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
358 data->widget = 357 data->widget =
359 snd_soc_dapm_new_control_unlocked(widget->dapm, 358 snd_soc_dapm_new_control_unlocked(widget->dapm,
360 &template); 359 &template);
360 kfree(name);
361 if (!data->widget) { 361 if (!data->widget) {
362 ret = -ENOMEM; 362 ret = -ENOMEM;
363 goto err_name; 363 goto err_data;
364 } 364 }
365 } 365 }
366 break; 366 break;
@@ -389,11 +389,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
389 389
390 data->value = template.on_val; 390 data->value = template.on_val;
391 391
392 data->widget = snd_soc_dapm_new_control(widget->dapm, 392 data->widget = snd_soc_dapm_new_control_unlocked(
393 &template); 393 widget->dapm, &template);
394 kfree(name);
394 if (!data->widget) { 395 if (!data->widget) {
395 ret = -ENOMEM; 396 ret = -ENOMEM;
396 goto err_name; 397 goto err_data;
397 } 398 }
398 399
399 snd_soc_dapm_add_path(widget->dapm, data->widget, 400 snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +409,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
408 409
409 return 0; 410 return 0;
410 411
411err_name:
412 kfree(name);
413err_data: 412err_data:
414 kfree(data); 413 kfree(data);
415 return ret; 414 return ret;
@@ -418,8 +417,6 @@ err_data:
418static void dapm_kcontrol_free(struct snd_kcontrol *kctl) 417static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
419{ 418{
420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); 419 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
421 if (data->widget)
422 kfree(data->widget->name);
423 kfree(data->wlist); 420 kfree(data->wlist);
424 kfree(data); 421 kfree(data);
425} 422}
@@ -896,7 +893,7 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
896 /* add kcontrol */ 893 /* add kcontrol */
897 for (i = 0; i < w->num_kcontrols; i++) { 894 for (i = 0; i < w->num_kcontrols; i++) {
898 /* match name */ 895 /* match name */
899 list_for_each_entry(path, &w->sources, list_sink) { 896 snd_soc_dapm_widget_for_each_source_path(w, path) {
900 /* mixer/mux paths name must match control name */ 897 /* mixer/mux paths name must match control name */
901 if (path->name != (char *)w->kcontrol_news[i].name) 898 if (path->name != (char *)w->kcontrol_news[i].name)
902 continue; 899 continue;
@@ -925,18 +922,18 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
925static int dapm_new_mux(struct snd_soc_dapm_widget *w) 922static int dapm_new_mux(struct snd_soc_dapm_widget *w)
926{ 923{
927 struct snd_soc_dapm_context *dapm = w->dapm; 924 struct snd_soc_dapm_context *dapm = w->dapm;
925 enum snd_soc_dapm_direction dir;
928 struct snd_soc_dapm_path *path; 926 struct snd_soc_dapm_path *path;
929 struct list_head *paths;
930 const char *type; 927 const char *type;
931 int ret; 928 int ret;
932 929
933 switch (w->id) { 930 switch (w->id) {
934 case snd_soc_dapm_mux: 931 case snd_soc_dapm_mux:
935 paths = &w->sources; 932 dir = SND_SOC_DAPM_DIR_OUT;
936 type = "mux"; 933 type = "mux";
937 break; 934 break;
938 case snd_soc_dapm_demux: 935 case snd_soc_dapm_demux:
939 paths = &w->sinks; 936 dir = SND_SOC_DAPM_DIR_IN;
940 type = "demux"; 937 type = "demux";
941 break; 938 break;
942 default: 939 default:
@@ -950,7 +947,7 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
950 return -EINVAL; 947 return -EINVAL;
951 } 948 }
952 949
953 if (list_empty(paths)) { 950 if (list_empty(&w->edges[dir])) {
954 dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name); 951 dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name);
955 return -EINVAL; 952 return -EINVAL;
956 } 953 }
@@ -959,16 +956,9 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
959 if (ret < 0) 956 if (ret < 0)
960 return ret; 957 return ret;
961 958
962 if (w->id == snd_soc_dapm_mux) { 959 snd_soc_dapm_widget_for_each_path(w, dir, path) {
963 list_for_each_entry(path, &w->sources, list_sink) { 960 if (path->name)
964 if (path->name) 961 dapm_kcontrol_add_path(w->kcontrols[0], path);
965 dapm_kcontrol_add_path(w->kcontrols[0], path);
966 }
967 } else {
968 list_for_each_entry(path, &w->sinks, list_source) {
969 if (path->name)
970 dapm_kcontrol_add_path(w->kcontrols[0], path);
971 }
972 } 962 }
973 963
974 return 0; 964 return 0;
@@ -1034,66 +1024,59 @@ static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
1034 } 1024 }
1035} 1025}
1036 1026
1037/* add widget to list if it's not already in the list */ 1027static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
1038static int dapm_list_add_widget(struct snd_soc_dapm_widget_list **list, 1028 struct list_head *widgets)
1039 struct snd_soc_dapm_widget *w)
1040{ 1029{
1041 struct snd_soc_dapm_widget_list *wlist; 1030 struct snd_soc_dapm_widget *w;
1042 int wlistsize, wlistentries, i; 1031 struct list_head *it;
1043 1032 unsigned int size = 0;
1044 if (*list == NULL) 1033 unsigned int i = 0;
1045 return -EINVAL;
1046
1047 wlist = *list;
1048 1034
1049 /* is this widget already in the list */ 1035 list_for_each(it, widgets)
1050 for (i = 0; i < wlist->num_widgets; i++) { 1036 size++;
1051 if (wlist->widgets[i] == w)
1052 return 0;
1053 }
1054 1037
1055 /* allocate some new space */ 1038 *list = kzalloc(sizeof(**list) + size * sizeof(*w), GFP_KERNEL);
1056 wlistentries = wlist->num_widgets + 1; 1039 if (*list == NULL)
1057 wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
1058 wlistentries * sizeof(struct snd_soc_dapm_widget *);
1059 *list = krealloc(wlist, wlistsize, GFP_KERNEL);
1060 if (*list == NULL) {
1061 dev_err(w->dapm->dev, "ASoC: can't allocate widget list for %s\n",
1062 w->name);
1063 return -ENOMEM; 1040 return -ENOMEM;
1064 }
1065 wlist = *list;
1066 1041
1067 /* insert the widget */ 1042 list_for_each_entry(w, widgets, work_list)
1068 dev_dbg(w->dapm->dev, "ASoC: added %s in widget list pos %d\n", 1043 (*list)->widgets[i++] = w;
1069 w->name, wlist->num_widgets);
1070 1044
1071 wlist->widgets[wlist->num_widgets] = w; 1045 (*list)->num_widgets = i;
1072 wlist->num_widgets++; 1046
1073 return 1; 1047 return 0;
1074} 1048}
1075 1049
1076/* 1050/*
1077 * Recursively check for a completed path to an active or physically connected 1051 * Common implementation for is_connected_output_ep() and
1078 * output widget. Returns number of complete paths. 1052 * is_connected_input_ep(). The function is inlined since the combined size of
1053 * the two specialized functions is only marginally larger then the size of the
1054 * generic function and at the same time the fast path of the specialized
1055 * functions is significantly smaller than the generic function.
1079 */ 1056 */
1080static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, 1057static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
1081 struct snd_soc_dapm_widget_list **list) 1058 struct list_head *list, enum snd_soc_dapm_direction dir,
1059 int (*fn)(struct snd_soc_dapm_widget *, struct list_head *))
1082{ 1060{
1061 enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
1083 struct snd_soc_dapm_path *path; 1062 struct snd_soc_dapm_path *path;
1084 int con = 0; 1063 int con = 0;
1085 1064
1086 if (widget->outputs >= 0) 1065 if (widget->endpoints[dir] >= 0)
1087 return widget->outputs; 1066 return widget->endpoints[dir];
1088 1067
1089 DAPM_UPDATE_STAT(widget, path_checks); 1068 DAPM_UPDATE_STAT(widget, path_checks);
1090 1069
1091 if (widget->is_sink && widget->connected) { 1070 /* do we need to add this widget to the list ? */
1092 widget->outputs = snd_soc_dapm_suspend_check(widget); 1071 if (list)
1093 return widget->outputs; 1072 list_add_tail(&widget->work_list, list);
1073
1074 if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
1075 widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget);
1076 return widget->endpoints[dir];
1094 } 1077 }
1095 1078
1096 list_for_each_entry(path, &widget->sinks, list_source) { 1079 snd_soc_dapm_widget_for_each_path(widget, rdir, path) {
1097 DAPM_UPDATE_STAT(widget, neighbour_checks); 1080 DAPM_UPDATE_STAT(widget, neighbour_checks);
1098 1081
1099 if (path->weak || path->is_supply) 1082 if (path->weak || path->is_supply)
@@ -1102,91 +1085,40 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
1102 if (path->walking) 1085 if (path->walking)
1103 return 1; 1086 return 1;
1104 1087
1105 trace_snd_soc_dapm_output_path(widget, path); 1088 trace_snd_soc_dapm_path(widget, dir, path);
1106 1089
1107 if (path->connect) { 1090 if (path->connect) {
1108 path->walking = 1; 1091 path->walking = 1;
1109 1092 con += fn(path->node[dir], list);
1110 /* do we need to add this widget to the list ? */
1111 if (list) {
1112 int err;
1113 err = dapm_list_add_widget(list, path->sink);
1114 if (err < 0) {
1115 dev_err(widget->dapm->dev,
1116 "ASoC: could not add widget %s\n",
1117 widget->name);
1118 path->walking = 0;
1119 return con;
1120 }
1121 }
1122
1123 con += is_connected_output_ep(path->sink, list);
1124
1125 path->walking = 0; 1093 path->walking = 0;
1126 } 1094 }
1127 } 1095 }
1128 1096
1129 widget->outputs = con; 1097 widget->endpoints[dir] = con;
1130 1098
1131 return con; 1099 return con;
1132} 1100}
1133 1101
1134/* 1102/*
1135 * Recursively check for a completed path to an active or physically connected 1103 * Recursively check for a completed path to an active or physically connected
1104 * output widget. Returns number of complete paths.
1105 */
1106static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
1107 struct list_head *list)
1108{
1109 return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT,
1110 is_connected_output_ep);
1111}
1112
1113/*
1114 * Recursively check for a completed path to an active or physically connected
1136 * input widget. Returns number of complete paths. 1115 * input widget. Returns number of complete paths.
1137 */ 1116 */
1138static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, 1117static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
1139 struct snd_soc_dapm_widget_list **list) 1118 struct list_head *list)
1140{ 1119{
1141 struct snd_soc_dapm_path *path; 1120 return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN,
1142 int con = 0; 1121 is_connected_input_ep);
1143
1144 if (widget->inputs >= 0)
1145 return widget->inputs;
1146
1147 DAPM_UPDATE_STAT(widget, path_checks);
1148
1149 if (widget->is_source && widget->connected) {
1150 widget->inputs = snd_soc_dapm_suspend_check(widget);
1151 return widget->inputs;
1152 }
1153
1154 list_for_each_entry(path, &widget->sources, list_sink) {
1155 DAPM_UPDATE_STAT(widget, neighbour_checks);
1156
1157 if (path->weak || path->is_supply)
1158 continue;
1159
1160 if (path->walking)
1161 return 1;
1162
1163 trace_snd_soc_dapm_input_path(widget, path);
1164
1165 if (path->connect) {
1166 path->walking = 1;
1167
1168 /* do we need to add this widget to the list ? */
1169 if (list) {
1170 int err;
1171 err = dapm_list_add_widget(list, path->source);
1172 if (err < 0) {
1173 dev_err(widget->dapm->dev,
1174 "ASoC: could not add widget %s\n",
1175 widget->name);
1176 path->walking = 0;
1177 return con;
1178 }
1179 }
1180
1181 con += is_connected_input_ep(path->source, list);
1182
1183 path->walking = 0;
1184 }
1185 }
1186
1187 widget->inputs = con;
1188
1189 return con;
1190} 1122}
1191 1123
1192/** 1124/**
@@ -1206,7 +1138,9 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
1206{ 1138{
1207 struct snd_soc_card *card = dai->component->card; 1139 struct snd_soc_card *card = dai->component->card;
1208 struct snd_soc_dapm_widget *w; 1140 struct snd_soc_dapm_widget *w;
1141 LIST_HEAD(widgets);
1209 int paths; 1142 int paths;
1143 int ret;
1210 1144
1211 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 1145 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
1212 1146
@@ -1215,14 +1149,21 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
1215 * to reset the cached number of inputs and outputs. 1149 * to reset the cached number of inputs and outputs.
1216 */ 1150 */
1217 list_for_each_entry(w, &card->widgets, list) { 1151 list_for_each_entry(w, &card->widgets, list) {
1218 w->inputs = -1; 1152 w->endpoints[SND_SOC_DAPM_DIR_IN] = -1;
1219 w->outputs = -1; 1153 w->endpoints[SND_SOC_DAPM_DIR_OUT] = -1;
1220 } 1154 }
1221 1155
1222 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 1156 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
1223 paths = is_connected_output_ep(dai->playback_widget, list); 1157 paths = is_connected_output_ep(dai->playback_widget, &widgets);
1224 else 1158 else
1225 paths = is_connected_input_ep(dai->capture_widget, list); 1159 paths = is_connected_input_ep(dai->capture_widget, &widgets);
1160
1161 /* Drop starting point */
1162 list_del(widgets.next);
1163
1164 ret = dapm_widget_list_create(list, &widgets);
1165 if (ret)
1166 paths = ret;
1226 1167
1227 trace_snd_soc_dapm_connected(paths, stream); 1168 trace_snd_soc_dapm_connected(paths, stream);
1228 mutex_unlock(&card->dapm_mutex); 1169 mutex_unlock(&card->dapm_mutex);
@@ -1323,7 +1264,7 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
1323 DAPM_UPDATE_STAT(w, power_checks); 1264 DAPM_UPDATE_STAT(w, power_checks);
1324 1265
1325 /* Check if one of our outputs is connected */ 1266 /* Check if one of our outputs is connected */
1326 list_for_each_entry(path, &w->sinks, list_source) { 1267 snd_soc_dapm_widget_for_each_sink_path(w, path) {
1327 DAPM_UPDATE_STAT(w, neighbour_checks); 1268 DAPM_UPDATE_STAT(w, neighbour_checks);
1328 1269
1329 if (path->weak) 1270 if (path->weak)
@@ -1747,12 +1688,12 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
1747 /* If we changed our power state perhaps our neigbours changed 1688 /* If we changed our power state perhaps our neigbours changed
1748 * also. 1689 * also.
1749 */ 1690 */
1750 list_for_each_entry(path, &w->sources, list_sink) 1691 snd_soc_dapm_widget_for_each_source_path(w, path)
1751 dapm_widget_set_peer_power(path->source, power, path->connect); 1692 dapm_widget_set_peer_power(path->source, power, path->connect);
1752 1693
1753 /* Supplies can't affect their outputs, only their inputs */ 1694 /* Supplies can't affect their outputs, only their inputs */
1754 if (!w->is_supply) { 1695 if (!w->is_supply) {
1755 list_for_each_entry(path, &w->sinks, list_source) 1696 snd_soc_dapm_widget_for_each_sink_path(w, path)
1756 dapm_widget_set_peer_power(path->sink, power, 1697 dapm_widget_set_peer_power(path->sink, power,
1757 path->connect); 1698 path->connect);
1758 } 1699 }
@@ -1952,6 +1893,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1952 size_t count, loff_t *ppos) 1893 size_t count, loff_t *ppos)
1953{ 1894{
1954 struct snd_soc_dapm_widget *w = file->private_data; 1895 struct snd_soc_dapm_widget *w = file->private_data;
1896 struct snd_soc_card *card = w->dapm->card;
1897 enum snd_soc_dapm_direction dir, rdir;
1955 char *buf; 1898 char *buf;
1956 int in, out; 1899 int in, out;
1957 ssize_t ret; 1900 ssize_t ret;
@@ -1961,6 +1904,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1961 if (!buf) 1904 if (!buf)
1962 return -ENOMEM; 1905 return -ENOMEM;
1963 1906
1907 mutex_lock(&card->dapm_mutex);
1908
1964 /* Supply widgets are not handled by is_connected_{input,output}_ep() */ 1909 /* Supply widgets are not handled by is_connected_{input,output}_ep() */
1965 if (w->is_supply) { 1910 if (w->is_supply) {
1966 in = 0; 1911 in = 0;
@@ -1986,27 +1931,25 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1986 w->sname, 1931 w->sname,
1987 w->active ? "active" : "inactive"); 1932 w->active ? "active" : "inactive");
1988 1933
1989 list_for_each_entry(p, &w->sources, list_sink) { 1934 snd_soc_dapm_for_each_direction(dir) {
1990 if (p->connected && !p->connected(w, p->source)) 1935 rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
1991 continue; 1936 snd_soc_dapm_widget_for_each_path(w, dir, p) {
1937 if (p->connected && !p->connected(w, p->node[rdir]))
1938 continue;
1992 1939
1993 if (p->connect) 1940 if (!p->connect)
1994 ret += snprintf(buf + ret, PAGE_SIZE - ret, 1941 continue;
1995 " in \"%s\" \"%s\"\n",
1996 p->name ? p->name : "static",
1997 p->source->name);
1998 }
1999 list_for_each_entry(p, &w->sinks, list_source) {
2000 if (p->connected && !p->connected(w, p->sink))
2001 continue;
2002 1942
2003 if (p->connect)
2004 ret += snprintf(buf + ret, PAGE_SIZE - ret, 1943 ret += snprintf(buf + ret, PAGE_SIZE - ret,
2005 " out \"%s\" \"%s\"\n", 1944 " %s \"%s\" \"%s\"\n",
1945 (rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
2006 p->name ? p->name : "static", 1946 p->name ? p->name : "static",
2007 p->sink->name); 1947 p->node[rdir]->name);
1948 }
2008 } 1949 }
2009 1950
1951 mutex_unlock(&card->dapm_mutex);
1952
2010 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 1953 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2011 1954
2012 kfree(buf); 1955 kfree(buf);
@@ -2220,14 +2163,16 @@ int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
2220} 2163}
2221EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power); 2164EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
2222 2165
2223static ssize_t dapm_widget_show_codec(struct snd_soc_codec *codec, char *buf) 2166static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
2167 char *buf)
2224{ 2168{
2169 struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
2225 struct snd_soc_dapm_widget *w; 2170 struct snd_soc_dapm_widget *w;
2226 int count = 0; 2171 int count = 0;
2227 char *state = "not set"; 2172 char *state = "not set";
2228 2173
2229 list_for_each_entry(w, &codec->component.card->widgets, list) { 2174 list_for_each_entry(w, &cmpnt->card->widgets, list) {
2230 if (w->dapm != &codec->dapm) 2175 if (w->dapm != dapm)
2231 continue; 2176 continue;
2232 2177
2233 /* only display widgets that burnm power */ 2178 /* only display widgets that burnm power */
@@ -2255,7 +2200,7 @@ static ssize_t dapm_widget_show_codec(struct snd_soc_codec *codec, char *buf)
2255 } 2200 }
2256 } 2201 }
2257 2202
2258 switch (codec->dapm.bias_level) { 2203 switch (snd_soc_dapm_get_bias_level(dapm)) {
2259 case SND_SOC_BIAS_ON: 2204 case SND_SOC_BIAS_ON:
2260 state = "On"; 2205 state = "On";
2261 break; 2206 break;
@@ -2281,11 +2226,16 @@ static ssize_t dapm_widget_show(struct device *dev,
2281 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); 2226 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
2282 int i, count = 0; 2227 int i, count = 0;
2283 2228
2229 mutex_lock(&rtd->card->dapm_mutex);
2230
2284 for (i = 0; i < rtd->num_codecs; i++) { 2231 for (i = 0; i < rtd->num_codecs; i++) {
2285 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec; 2232 struct snd_soc_component *cmpnt = rtd->codec_dais[i]->component;
2286 count += dapm_widget_show_codec(codec, buf + count); 2233
2234 count += dapm_widget_show_component(cmpnt, buf + count);
2287 } 2235 }
2288 2236
2237 mutex_unlock(&rtd->card->dapm_mutex);
2238
2289 return count; 2239 return count;
2290} 2240}
2291 2241
@@ -2298,37 +2248,43 @@ struct attribute *soc_dapm_dev_attrs[] = {
2298 2248
2299static void dapm_free_path(struct snd_soc_dapm_path *path) 2249static void dapm_free_path(struct snd_soc_dapm_path *path)
2300{ 2250{
2301 list_del(&path->list_sink); 2251 list_del(&path->list_node[SND_SOC_DAPM_DIR_IN]);
2302 list_del(&path->list_source); 2252 list_del(&path->list_node[SND_SOC_DAPM_DIR_OUT]);
2303 list_del(&path->list_kcontrol); 2253 list_del(&path->list_kcontrol);
2304 list_del(&path->list); 2254 list_del(&path->list);
2305 kfree(path); 2255 kfree(path);
2306} 2256}
2307 2257
2258void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
2259{
2260 struct snd_soc_dapm_path *p, *next_p;
2261 enum snd_soc_dapm_direction dir;
2262
2263 list_del(&w->list);
2264 /*
2265 * remove source and sink paths associated to this widget.
2266 * While removing the path, remove reference to it from both
2267 * source and sink widgets so that path is removed only once.
2268 */
2269 snd_soc_dapm_for_each_direction(dir) {
2270 snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p)
2271 dapm_free_path(p);
2272 }
2273
2274 kfree(w->kcontrols);
2275 kfree_const(w->name);
2276 kfree(w);
2277}
2278
2308/* free all dapm widgets and resources */ 2279/* free all dapm widgets and resources */
2309static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) 2280static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
2310{ 2281{
2311 struct snd_soc_dapm_widget *w, *next_w; 2282 struct snd_soc_dapm_widget *w, *next_w;
2312 struct snd_soc_dapm_path *p, *next_p;
2313 2283
2314 list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) { 2284 list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
2315 if (w->dapm != dapm) 2285 if (w->dapm != dapm)
2316 continue; 2286 continue;
2317 list_del(&w->list); 2287 snd_soc_dapm_free_widget(w);
2318 /*
2319 * remove source and sink paths associated to this widget.
2320 * While removing the path, remove reference to it from both
2321 * source and sink widgets so that path is removed only once.
2322 */
2323 list_for_each_entry_safe(p, next_p, &w->sources, list_sink)
2324 dapm_free_path(p);
2325
2326 list_for_each_entry_safe(p, next_p, &w->sinks, list_source)
2327 dapm_free_path(p);
2328
2329 kfree(w->kcontrols);
2330 kfree(w->name);
2331 kfree(w);
2332 } 2288 }
2333} 2289}
2334 2290
@@ -2434,20 +2390,22 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
2434 */ 2390 */
2435static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w) 2391static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
2436{ 2392{
2393 enum snd_soc_dapm_direction dir;
2437 struct snd_soc_dapm_path *p; 2394 struct snd_soc_dapm_path *p;
2395 unsigned int ep;
2438 2396
2439 switch (w->id) { 2397 switch (w->id) {
2440 case snd_soc_dapm_input: 2398 case snd_soc_dapm_input:
2441 /* On a fully routed card a input is never a source */ 2399 /* On a fully routed card a input is never a source */
2442 if (w->dapm->card->fully_routed) 2400 if (w->dapm->card->fully_routed)
2443 break; 2401 return;
2444 w->is_source = 1; 2402 ep = SND_SOC_DAPM_EP_SOURCE;
2445 list_for_each_entry(p, &w->sources, list_sink) { 2403 snd_soc_dapm_widget_for_each_source_path(w, p) {
2446 if (p->source->id == snd_soc_dapm_micbias || 2404 if (p->source->id == snd_soc_dapm_micbias ||
2447 p->source->id == snd_soc_dapm_mic || 2405 p->source->id == snd_soc_dapm_mic ||
2448 p->source->id == snd_soc_dapm_line || 2406 p->source->id == snd_soc_dapm_line ||
2449 p->source->id == snd_soc_dapm_output) { 2407 p->source->id == snd_soc_dapm_output) {
2450 w->is_source = 0; 2408 ep = 0;
2451 break; 2409 break;
2452 } 2410 }
2453 } 2411 }
@@ -2455,25 +2413,30 @@ static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
2455 case snd_soc_dapm_output: 2413 case snd_soc_dapm_output:
2456 /* On a fully routed card a output is never a sink */ 2414 /* On a fully routed card a output is never a sink */
2457 if (w->dapm->card->fully_routed) 2415 if (w->dapm->card->fully_routed)
2458 break; 2416 return;
2459 w->is_sink = 1; 2417 ep = SND_SOC_DAPM_EP_SINK;
2460 list_for_each_entry(p, &w->sinks, list_source) { 2418 snd_soc_dapm_widget_for_each_sink_path(w, p) {
2461 if (p->sink->id == snd_soc_dapm_spk || 2419 if (p->sink->id == snd_soc_dapm_spk ||
2462 p->sink->id == snd_soc_dapm_hp || 2420 p->sink->id == snd_soc_dapm_hp ||
2463 p->sink->id == snd_soc_dapm_line || 2421 p->sink->id == snd_soc_dapm_line ||
2464 p->sink->id == snd_soc_dapm_input) { 2422 p->sink->id == snd_soc_dapm_input) {
2465 w->is_sink = 0; 2423 ep = 0;
2466 break; 2424 break;
2467 } 2425 }
2468 } 2426 }
2469 break; 2427 break;
2470 case snd_soc_dapm_line: 2428 case snd_soc_dapm_line:
2471 w->is_sink = !list_empty(&w->sources); 2429 ep = 0;
2472 w->is_source = !list_empty(&w->sinks); 2430 snd_soc_dapm_for_each_direction(dir) {
2431 if (!list_empty(&w->edges[dir]))
2432 ep |= SND_SOC_DAPM_DIR_TO_EP(dir);
2433 }
2473 break; 2434 break;
2474 default: 2435 default:
2475 break; 2436 return;
2476 } 2437 }
2438
2439 w->is_ep = ep;
2477} 2440}
2478 2441
2479static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm, 2442static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm,
@@ -2526,6 +2489,8 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
2526 int (*connected)(struct snd_soc_dapm_widget *source, 2489 int (*connected)(struct snd_soc_dapm_widget *source,
2527 struct snd_soc_dapm_widget *sink)) 2490 struct snd_soc_dapm_widget *sink))
2528{ 2491{
2492 struct snd_soc_dapm_widget *widgets[2];
2493 enum snd_soc_dapm_direction dir;
2529 struct snd_soc_dapm_path *path; 2494 struct snd_soc_dapm_path *path;
2530 int ret; 2495 int ret;
2531 2496
@@ -2558,13 +2523,14 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
2558 if (!path) 2523 if (!path)
2559 return -ENOMEM; 2524 return -ENOMEM;
2560 2525
2561 path->source = wsource; 2526 path->node[SND_SOC_DAPM_DIR_IN] = wsource;
2562 path->sink = wsink; 2527 path->node[SND_SOC_DAPM_DIR_OUT] = wsink;
2528 widgets[SND_SOC_DAPM_DIR_IN] = wsource;
2529 widgets[SND_SOC_DAPM_DIR_OUT] = wsink;
2530
2563 path->connected = connected; 2531 path->connected = connected;
2564 INIT_LIST_HEAD(&path->list); 2532 INIT_LIST_HEAD(&path->list);
2565 INIT_LIST_HEAD(&path->list_kcontrol); 2533 INIT_LIST_HEAD(&path->list_kcontrol);
2566 INIT_LIST_HEAD(&path->list_source);
2567 INIT_LIST_HEAD(&path->list_sink);
2568 2534
2569 if (wsource->is_supply || wsink->is_supply) 2535 if (wsource->is_supply || wsink->is_supply)
2570 path->is_supply = 1; 2536 path->is_supply = 1;
@@ -2602,14 +2568,13 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
2602 } 2568 }
2603 2569
2604 list_add(&path->list, &dapm->card->paths); 2570 list_add(&path->list, &dapm->card->paths);
2605 list_add(&path->list_sink, &wsink->sources); 2571 snd_soc_dapm_for_each_direction(dir)
2606 list_add(&path->list_source, &wsource->sinks); 2572 list_add(&path->list_node[dir], &widgets[dir]->edges[dir]);
2607
2608 dapm_update_widget_flags(wsource);
2609 dapm_update_widget_flags(wsink);
2610 2573
2611 dapm_mark_dirty(wsource, "Route added"); 2574 snd_soc_dapm_for_each_direction(dir) {
2612 dapm_mark_dirty(wsink, "Route added"); 2575 dapm_update_widget_flags(widgets[dir]);
2576 dapm_mark_dirty(widgets[dir], "Route added");
2577 }
2613 2578
2614 if (dapm->card->instantiated && path->connect) 2579 if (dapm->card->instantiated && path->connect)
2615 dapm_path_invalidate(path); 2580 dapm_path_invalidate(path);
@@ -2857,7 +2822,7 @@ static int snd_soc_dapm_weak_route(struct snd_soc_dapm_context *dapm,
2857 dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n", 2822 dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n",
2858 route->source, route->sink); 2823 route->source, route->sink);
2859 2824
2860 list_for_each_entry(path, &source->sinks, list_source) { 2825 snd_soc_dapm_widget_for_each_sink_path(source, path) {
2861 if (path->sink == sink) { 2826 if (path->sink == sink) {
2862 path->weak = 1; 2827 path->weak = 1;
2863 count++; 2828 count++;
@@ -2911,7 +2876,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_weak_routes);
2911 2876
2912/** 2877/**
2913 * snd_soc_dapm_new_widgets - add new dapm widgets 2878 * snd_soc_dapm_new_widgets - add new dapm widgets
2914 * @dapm: DAPM context 2879 * @card: card to be checked for new dapm widgets
2915 * 2880 *
2916 * Checks the codec for any new dapm widgets and creates them if found. 2881 * Checks the codec for any new dapm widgets and creates them if found.
2917 * 2882 *
@@ -3291,6 +3256,7 @@ struct snd_soc_dapm_widget *
3291snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, 3256snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3292 const struct snd_soc_dapm_widget *widget) 3257 const struct snd_soc_dapm_widget *widget)
3293{ 3258{
3259 enum snd_soc_dapm_direction dir;
3294 struct snd_soc_dapm_widget *w; 3260 struct snd_soc_dapm_widget *w;
3295 const char *prefix; 3261 const char *prefix;
3296 int ret; 3262 int ret;
@@ -3334,16 +3300,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3334 } 3300 }
3335 3301
3336 prefix = soc_dapm_prefix(dapm); 3302 prefix = soc_dapm_prefix(dapm);
3337 if (prefix) { 3303 if (prefix)
3338 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); 3304 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
3339 if (widget->sname) 3305 else
3340 w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix, 3306 w->name = kstrdup_const(widget->name, GFP_KERNEL);
3341 widget->sname);
3342 } else {
3343 w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
3344 if (widget->sname)
3345 w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
3346 }
3347 if (w->name == NULL) { 3307 if (w->name == NULL) {
3348 kfree(w); 3308 kfree(w);
3349 return NULL; 3309 return NULL;
@@ -3351,27 +3311,27 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3351 3311
3352 switch (w->id) { 3312 switch (w->id) {
3353 case snd_soc_dapm_mic: 3313 case snd_soc_dapm_mic:
3354 w->is_source = 1; 3314 w->is_ep = SND_SOC_DAPM_EP_SOURCE;
3355 w->power_check = dapm_generic_check_power; 3315 w->power_check = dapm_generic_check_power;
3356 break; 3316 break;
3357 case snd_soc_dapm_input: 3317 case snd_soc_dapm_input:
3358 if (!dapm->card->fully_routed) 3318 if (!dapm->card->fully_routed)
3359 w->is_source = 1; 3319 w->is_ep = SND_SOC_DAPM_EP_SOURCE;
3360 w->power_check = dapm_generic_check_power; 3320 w->power_check = dapm_generic_check_power;
3361 break; 3321 break;
3362 case snd_soc_dapm_spk: 3322 case snd_soc_dapm_spk:
3363 case snd_soc_dapm_hp: 3323 case snd_soc_dapm_hp:
3364 w->is_sink = 1; 3324 w->is_ep = SND_SOC_DAPM_EP_SINK;
3365 w->power_check = dapm_generic_check_power; 3325 w->power_check = dapm_generic_check_power;
3366 break; 3326 break;
3367 case snd_soc_dapm_output: 3327 case snd_soc_dapm_output:
3368 if (!dapm->card->fully_routed) 3328 if (!dapm->card->fully_routed)
3369 w->is_sink = 1; 3329 w->is_ep = SND_SOC_DAPM_EP_SINK;
3370 w->power_check = dapm_generic_check_power; 3330 w->power_check = dapm_generic_check_power;
3371 break; 3331 break;
3372 case snd_soc_dapm_vmid: 3332 case snd_soc_dapm_vmid:
3373 case snd_soc_dapm_siggen: 3333 case snd_soc_dapm_siggen:
3374 w->is_source = 1; 3334 w->is_ep = SND_SOC_DAPM_EP_SOURCE;
3375 w->power_check = dapm_always_on_check_power; 3335 w->power_check = dapm_always_on_check_power;
3376 break; 3336 break;
3377 case snd_soc_dapm_mux: 3337 case snd_soc_dapm_mux:
@@ -3405,14 +3365,14 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3405 } 3365 }
3406 3366
3407 w->dapm = dapm; 3367 w->dapm = dapm;
3408 INIT_LIST_HEAD(&w->sources);
3409 INIT_LIST_HEAD(&w->sinks);
3410 INIT_LIST_HEAD(&w->list); 3368 INIT_LIST_HEAD(&w->list);
3411 INIT_LIST_HEAD(&w->dirty); 3369 INIT_LIST_HEAD(&w->dirty);
3412 list_add_tail(&w->list, &dapm->card->widgets); 3370 list_add_tail(&w->list, &dapm->card->widgets);
3413 3371
3414 w->inputs = -1; 3372 snd_soc_dapm_for_each_direction(dir) {
3415 w->outputs = -1; 3373 INIT_LIST_HEAD(&w->edges[dir]);
3374 w->endpoints[dir] = -1;
3375 }
3416 3376
3417 /* machine layer set ups unconnected pins and insertions */ 3377 /* machine layer set ups unconnected pins and insertions */
3418 w->connected = 1; 3378 w->connected = 1;
@@ -3466,19 +3426,17 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
3466 int ret; 3426 int ret;
3467 3427
3468 if (WARN_ON(!config) || 3428 if (WARN_ON(!config) ||
3469 WARN_ON(list_empty(&w->sources) || list_empty(&w->sinks))) 3429 WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) ||
3430 list_empty(&w->edges[SND_SOC_DAPM_DIR_IN])))
3470 return -EINVAL; 3431 return -EINVAL;
3471 3432
3472 /* We only support a single source and sink, pick the first */ 3433 /* We only support a single source and sink, pick the first */
3473 source_p = list_first_entry(&w->sources, struct snd_soc_dapm_path, 3434 source_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_OUT],
3474 list_sink); 3435 struct snd_soc_dapm_path,
3475 sink_p = list_first_entry(&w->sinks, struct snd_soc_dapm_path, 3436 list_node[SND_SOC_DAPM_DIR_OUT]);
3476 list_source); 3437 sink_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_IN],
3477 3438 struct snd_soc_dapm_path,
3478 if (WARN_ON(!source_p || !sink_p) || 3439 list_node[SND_SOC_DAPM_DIR_IN]);
3479 WARN_ON(!sink_p->source || !source_p->sink) ||
3480 WARN_ON(!source_p->source || !sink_p->sink))
3481 return -EINVAL;
3482 3440
3483 source = source_p->source->priv; 3441 source = source_p->source->priv;
3484 sink = sink_p->sink->priv; 3442 sink = sink_p->sink->priv;
@@ -3792,7 +3750,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3792 break; 3750 break;
3793 } 3751 }
3794 3752
3795 if (!w->sname || !strstr(w->sname, dai_w->name)) 3753 if (!w->sname || !strstr(w->sname, dai_w->sname))
3796 continue; 3754 continue;
3797 3755
3798 if (dai_w->id == snd_soc_dapm_dai_in) { 3756 if (dai_w->id == snd_soc_dapm_dai_in) {
@@ -3820,11 +3778,6 @@ static void dapm_connect_dai_link_widgets(struct snd_soc_card *card,
3820 for (i = 0; i < rtd->num_codecs; i++) { 3778 for (i = 0; i < rtd->num_codecs; i++) {
3821 struct snd_soc_dai *codec_dai = rtd->codec_dais[i]; 3779 struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
3822 3780
3823 /* there is no point in connecting BE DAI links with dummies */
3824 if (snd_soc_dai_is_dummy(codec_dai) ||
3825 snd_soc_dai_is_dummy(cpu_dai))
3826 continue;
3827
3828 /* connect BE DAI playback if widgets are valid */ 3781 /* connect BE DAI playback if widgets are valid */
3829 if (codec_dai->playback_widget && cpu_dai->playback_widget) { 3782 if (codec_dai->playback_widget && cpu_dai->playback_widget) {
3830 source = cpu_dai->playback_widget; 3783 source = cpu_dai->playback_widget;
@@ -3855,6 +3808,7 @@ static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream,
3855 int event) 3808 int event)
3856{ 3809{
3857 struct snd_soc_dapm_widget *w; 3810 struct snd_soc_dapm_widget *w;
3811 unsigned int ep;
3858 3812
3859 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 3813 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
3860 w = dai->playback_widget; 3814 w = dai->playback_widget;
@@ -3864,12 +3818,22 @@ static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream,
3864 if (w) { 3818 if (w) {
3865 dapm_mark_dirty(w, "stream event"); 3819 dapm_mark_dirty(w, "stream event");
3866 3820
3821 if (w->id == snd_soc_dapm_dai_in) {
3822 ep = SND_SOC_DAPM_EP_SOURCE;
3823 dapm_widget_invalidate_input_paths(w);
3824 } else {
3825 ep = SND_SOC_DAPM_EP_SINK;
3826 dapm_widget_invalidate_output_paths(w);
3827 }
3828
3867 switch (event) { 3829 switch (event) {
3868 case SND_SOC_DAPM_STREAM_START: 3830 case SND_SOC_DAPM_STREAM_START:
3869 w->active = 1; 3831 w->active = 1;
3832 w->is_ep = ep;
3870 break; 3833 break;
3871 case SND_SOC_DAPM_STREAM_STOP: 3834 case SND_SOC_DAPM_STREAM_STOP:
3872 w->active = 0; 3835 w->active = 0;
3836 w->is_ep = 0;
3873 break; 3837 break;
3874 case SND_SOC_DAPM_STREAM_SUSPEND: 3838 case SND_SOC_DAPM_STREAM_SUSPEND:
3875 case SND_SOC_DAPM_STREAM_RESUME: 3839 case SND_SOC_DAPM_STREAM_RESUME:
@@ -3877,14 +3841,6 @@ static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream,
3877 case SND_SOC_DAPM_STREAM_PAUSE_RELEASE: 3841 case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
3878 break; 3842 break;
3879 } 3843 }
3880
3881 if (w->id == snd_soc_dapm_dai_in) {
3882 w->is_source = w->active;
3883 dapm_widget_invalidate_input_paths(w);
3884 } else {
3885 w->is_sink = w->active;
3886 dapm_widget_invalidate_output_paths(w);
3887 }
3888 } 3844 }
3889} 3845}
3890 3846
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 256b9c91aa94..70e4b9d8bdcd 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1231,24 +1231,17 @@ static int widget_in_list(struct snd_soc_dapm_widget_list *list,
1231} 1231}
1232 1232
1233int dpcm_path_get(struct snd_soc_pcm_runtime *fe, 1233int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
1234 int stream, struct snd_soc_dapm_widget_list **list_) 1234 int stream, struct snd_soc_dapm_widget_list **list)
1235{ 1235{
1236 struct snd_soc_dai *cpu_dai = fe->cpu_dai; 1236 struct snd_soc_dai *cpu_dai = fe->cpu_dai;
1237 struct snd_soc_dapm_widget_list *list;
1238 int paths; 1237 int paths;
1239 1238
1240 list = kzalloc(sizeof(struct snd_soc_dapm_widget_list) +
1241 sizeof(struct snd_soc_dapm_widget *), GFP_KERNEL);
1242 if (list == NULL)
1243 return -ENOMEM;
1244
1245 /* get number of valid DAI paths and their widgets */ 1239 /* get number of valid DAI paths and their widgets */
1246 paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, &list); 1240 paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list);
1247 1241
1248 dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths, 1242 dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
1249 stream ? "capture" : "playback"); 1243 stream ? "capture" : "playback");
1250 1244
1251 *list_ = list;
1252 return paths; 1245 return paths;
1253} 1246}
1254 1247
@@ -1306,7 +1299,12 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
1306 1299
1307 switch (list->widgets[i]->id) { 1300 switch (list->widgets[i]->id) {
1308 case snd_soc_dapm_dai_in: 1301 case snd_soc_dapm_dai_in:
1302 if (stream != SNDRV_PCM_STREAM_PLAYBACK)
1303 continue;
1304 break;
1309 case snd_soc_dapm_dai_out: 1305 case snd_soc_dapm_dai_out:
1306 if (stream != SNDRV_PCM_STREAM_CAPTURE)
1307 continue;
1310 break; 1308 break;
1311 default: 1309 default:
1312 continue; 1310 continue;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d0960683c409..f4e92d35316e 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
33#include <sound/soc.h> 33#include <sound/soc.h>
34#include <sound/soc-dapm.h> 34#include <sound/soc-dapm.h>
35#include <sound/soc-topology.h> 35#include <sound/soc-topology.h>
36#include <sound/tlv.h>
36 37
37/* 38/*
38 * We make several passes over the data (since it wont necessarily be ordered) 39 * We make several passes over the data (since it wont necessarily be ordered)
@@ -144,7 +145,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
144 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe, 145 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
145 snd_soc_put_strobe, NULL}, 146 snd_soc_put_strobe, NULL},
146 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw, 147 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
147 snd_soc_dapm_put_volsw, NULL}, 148 snd_soc_dapm_put_volsw, snd_soc_info_volsw},
148 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double, 149 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
149 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double}, 150 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
150 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double, 151 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
534 k->put = bops[i].put; 535 k->put = bops[i].put;
535 if (k->get == NULL && bops[i].id == hdr->ops.get) 536 if (k->get == NULL && bops[i].id == hdr->ops.get)
536 k->get = bops[i].get; 537 k->get = bops[i].get;
537 if (k->info == NULL && ops[i].id == hdr->ops.info) 538 if (k->info == NULL && bops[i].id == hdr->ops.info)
538 k->info = bops[i].info; 539 k->info = bops[i].info;
539 } 540 }
540 541
@@ -579,29 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
579 return 0; 580 return 0;
580} 581}
581 582
583
584static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
585 struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
586{
587 unsigned int item_len = 2 * sizeof(unsigned int);
588 unsigned int *p;
589
590 p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
591 if (!p)
592 return -ENOMEM;
593
594 p[0] = SNDRV_CTL_TLVT_DB_SCALE;
595 p[1] = item_len;
596 p[2] = scale->min;
597 p[3] = (scale->step & TLV_DB_SCALE_MASK)
598 | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
599
600 kc->tlv.p = (void *)p;
601 return 0;
602}
603
582static int soc_tplg_create_tlv(struct soc_tplg *tplg, 604static int soc_tplg_create_tlv(struct soc_tplg *tplg,
583 struct snd_kcontrol_new *kc, u32 tlv_size) 605 struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
584{ 606{
585 struct snd_soc_tplg_ctl_tlv *tplg_tlv; 607 struct snd_soc_tplg_ctl_tlv *tplg_tlv;
586 struct snd_ctl_tlv *tlv;
587 608
588 if (tlv_size == 0) 609 if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
589 return 0; 610 return 0;
590 611
591 tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos; 612 if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
592 tplg->pos += tlv_size; 613 kc->tlv.c = snd_soc_bytes_tlv_callback;
593 614 } else {
594 tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL); 615 tplg_tlv = &tc->tlv;
595 if (tlv == NULL) 616 switch (tplg_tlv->type) {
596 return -ENOMEM; 617 case SNDRV_CTL_TLVT_DB_SCALE:
597 618 return soc_tplg_create_tlv_db_scale(tplg, kc,
598 dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", 619 &tplg_tlv->scale);
599 tplg_tlv->numid, tplg_tlv->size);
600 620
601 tlv->numid = tplg_tlv->numid; 621 /* TODO: add support for other TLV types */
602 tlv->length = tplg_tlv->size; 622 default:
603 memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size); 623 dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
604 kc->tlv.p = (void *)tlv; 624 tplg_tlv->type);
625 return -EINVAL;
626 }
627 }
605 628
606 return 0; 629 return 0;
607} 630}
@@ -773,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
773 } 796 }
774 797
775 /* create any TLV data */ 798 /* create any TLV data */
776 soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size); 799 soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
777 800
778 /* register control here */ 801 /* register control here */
779 err = soc_tplg_add_kcontrol(tplg, &kc, 802 err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1351,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
1351 template.reg = w->reg; 1374 template.reg = w->reg;
1352 template.shift = w->shift; 1375 template.shift = w->shift;
1353 template.mask = w->mask; 1376 template.mask = w->mask;
1377 template.subseq = w->subseq;
1354 template.on_val = w->invert ? 0 : 1; 1378 template.on_val = w->invert ? 0 : 1;
1355 template.off_val = w->invert ? 1 : 0; 1379 template.off_val = w->invert ? 1 : 0;
1356 template.ignore_suspend = w->ignore_suspend; 1380 template.ignore_suspend = w->ignore_suspend;
@@ -1734,7 +1758,6 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
1734 u32 index) 1758 u32 index)
1735{ 1759{
1736 struct snd_soc_dapm_widget *w, *next_w; 1760 struct snd_soc_dapm_widget *w, *next_w;
1737 struct snd_soc_dapm_path *p, *next_p;
1738 1761
1739 list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) { 1762 list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
1740 1763
@@ -1746,31 +1769,9 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm,
1746 if (w->dobj.index != index && 1769 if (w->dobj.index != index &&
1747 w->dobj.index != SND_SOC_TPLG_INDEX_ALL) 1770 w->dobj.index != SND_SOC_TPLG_INDEX_ALL)
1748 continue; 1771 continue;
1749
1750 list_del(&w->list);
1751
1752 /*
1753 * remove source and sink paths associated to this widget.
1754 * While removing the path, remove reference to it from both
1755 * source and sink widgets so that path is removed only once.
1756 */
1757 list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
1758 list_del(&p->list_sink);
1759 list_del(&p->list_source);
1760 list_del(&p->list);
1761 kfree(p);
1762 }
1763 list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
1764 list_del(&p->list_sink);
1765 list_del(&p->list_source);
1766 list_del(&p->list);
1767 kfree(p);
1768 }
1769 /* check and free and dynamic widget kcontrols */ 1772 /* check and free and dynamic widget kcontrols */
1770 snd_soc_tplg_widget_remove(w); 1773 snd_soc_tplg_widget_remove(w);
1771 kfree(w->kcontrols); 1774 snd_soc_dapm_free_widget(w);
1772 kfree(w->name);
1773 kfree(w);
1774 } 1775 }
1775} 1776}
1776EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all); 1777EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index a4028601da01..977a078eb92f 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -203,35 +203,25 @@ static int spdif_in_probe(struct platform_device *pdev)
203 struct spdif_in_dev *host; 203 struct spdif_in_dev *host;
204 struct spear_spdif_platform_data *pdata; 204 struct spear_spdif_platform_data *pdata;
205 struct resource *res, *res_fifo; 205 struct resource *res, *res_fifo;
206 void __iomem *io_base;
206 int ret; 207 int ret;
207 208
208 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
209 if (!res) 210 io_base = devm_ioremap_resource(&pdev->dev, res);
210 return -EINVAL; 211 if (IS_ERR(io_base))
212 return PTR_ERR(io_base);
211 213
212 res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0); 214 res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0);
213 if (!res_fifo) 215 if (!res_fifo)
214 return -EINVAL; 216 return -EINVAL;
215 217
216 if (!devm_request_mem_region(&pdev->dev, res->start,
217 resource_size(res), pdev->name)) {
218 dev_warn(&pdev->dev, "Failed to get memory resourse\n");
219 return -ENOENT;
220 }
221
222 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); 218 host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
223 if (!host) { 219 if (!host) {
224 dev_warn(&pdev->dev, "kzalloc fail\n"); 220 dev_warn(&pdev->dev, "kzalloc fail\n");
225 return -ENOMEM; 221 return -ENOMEM;
226 } 222 }
227 223
228 host->io_base = devm_ioremap(&pdev->dev, res->start, 224 host->io_base = io_base;
229 resource_size(res));
230 if (!host->io_base) {
231 dev_warn(&pdev->dev, "ioremap failed\n");
232 return -ENOMEM;
233 }
234
235 host->irq = platform_get_irq(pdev, 0); 225 host->irq = platform_get_irq(pdev, 0);
236 if (host->irq < 0) 226 if (host->irq < 0)
237 return -EINVAL; 227 return -EINVAL;
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index a7dc3c56f44d..e8476da157cd 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -44,7 +44,7 @@ int devm_spear_pcm_platform_register(struct device *dev,
44 *config = spear_dmaengine_pcm_config; 44 *config = spear_dmaengine_pcm_config;
45 config->compat_filter_fn = filter; 45 config->compat_filter_fn = filter;
46 46
47 return snd_dmaengine_pcm_register(dev, config, 47 return devm_snd_dmaengine_pcm_register(dev, config,
48 SND_DMAENGINE_PCM_FLAG_NO_DT | 48 SND_DMAENGINE_PCM_FLAG_NO_DT |
49 SND_DMAENGINE_PCM_FLAG_COMPAT); 49 SND_DMAENGINE_PCM_FLAG_COMPAT);
50} 50}
diff --git a/sound/soc/sti/Kconfig b/sound/soc/sti/Kconfig
new file mode 100644
index 000000000000..64a690077023
--- /dev/null
+++ b/sound/soc/sti/Kconfig
@@ -0,0 +1,11 @@
1#
2# STM SoC audio configuration
3#
4menuconfig SND_SOC_STI
5 tristate "SoC Audio support for STI System-On-Chip"
6 depends on SND_SOC
7 depends on ARCH_STI || COMPILE_TEST
8 select SND_SOC_GENERIC_DMAENGINE_PCM
9 help
10 Say Y if you want to enable ASoC-support for
11 any of the STI platforms (e.g. STIH416).
diff --git a/sound/soc/sti/Makefile b/sound/soc/sti/Makefile
new file mode 100644
index 000000000000..4b188d2d76b8
--- /dev/null
+++ b/sound/soc/sti/Makefile
@@ -0,0 +1,4 @@
1# STI platform support
2snd-soc-sti-objs := sti_uniperif.o uniperif_player.o uniperif_reader.o
3
4obj-$(CONFIG_SND_SOC_STI) += snd-soc-sti.o
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
new file mode 100644
index 000000000000..39bcefe5eea0
--- /dev/null
+++ b/sound/soc/sti/sti_uniperif.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2015
3 * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
4 * for STMicroelectronics.
5 * License terms: GNU General Public License (GPL), version 2
6 */
7
8#include <linux/module.h>
9#include <linux/pinctrl/consumer.h>
10
11#include "uniperif.h"
12
13/*
14 * sti_uniperiph_dai_create_ctrl
15 * This function is used to create Ctrl associated to DAI but also pcm device.
16 * Request is done by front end to associate ctrl with pcm device id
17 */
18static int sti_uniperiph_dai_create_ctrl(struct snd_soc_dai *dai)
19{
20 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
21 struct uniperif *uni = priv->dai_data.uni;
22 struct snd_kcontrol_new *ctrl;
23 int i;
24
25 if (!uni->num_ctrls)
26 return 0;
27
28 for (i = 0; i < uni->num_ctrls; i++) {
29 /*
30 * Several Control can have same name. Controls are indexed on
31 * Uniperipheral instance ID
32 */
33 ctrl = &uni->snd_ctrls[i];
34 ctrl->index = uni->info->id;
35 ctrl->device = uni->info->id;
36 }
37
38 return snd_soc_add_dai_controls(dai, uni->snd_ctrls, uni->num_ctrls);
39}
40
41/*
42 * DAI
43 */
44int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
45 struct snd_pcm_hw_params *params,
46 struct snd_soc_dai *dai)
47{
48 struct snd_dmaengine_dai_dma_data *dma_data;
49 int transfer_size;
50
51 transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
52
53 dma_data = snd_soc_dai_get_dma_data(dai, substream);
54 dma_data->maxburst = transfer_size;
55
56 return 0;
57}
58
59int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
60{
61 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
62
63 priv->dai_data.uni->daifmt = fmt;
64
65 return 0;
66}
67
68static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
69{
70 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
71 struct uniperif *uni = priv->dai_data.uni;
72 int ret;
73
74 /* The uniperipheral should be in stopped state */
75 if (uni->state != UNIPERIF_STATE_STOPPED) {
76 dev_err(uni->dev, "%s: invalid uni state( %d)",
77 __func__, (int)uni->state);
78 return -EBUSY;
79 }
80
81 /* Pinctrl: switch pinstate to sleep */
82 ret = pinctrl_pm_select_sleep_state(uni->dev);
83 if (ret)
84 dev_err(uni->dev, "%s: failed to select pinctrl state",
85 __func__);
86
87 return ret;
88}
89
90static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
91{
92 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
93 struct uniperif *uni = priv->dai_data.uni;
94 int ret;
95
96 if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player")) {
97 ret = uni_player_resume(uni);
98 if (ret)
99 return ret;
100 }
101
102 /* pinctrl: switch pinstate to default */
103 ret = pinctrl_pm_select_default_state(uni->dev);
104 if (ret)
105 dev_err(uni->dev, "%s: failed to select pinctrl state",
106 __func__);
107
108 return ret;
109}
110
111static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
112{
113 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
114 struct sti_uniperiph_dai *dai_data = &priv->dai_data;
115
116 /* DMA settings*/
117 if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player"))
118 snd_soc_dai_init_dma_data(dai, &dai_data->dma_data, NULL);
119 else
120 snd_soc_dai_init_dma_data(dai, NULL, &dai_data->dma_data);
121
122 dai_data->dma_data.addr = dai_data->uni->fifo_phys_address;
123 dai_data->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
124
125 return sti_uniperiph_dai_create_ctrl(dai);
126}
127
128static const struct snd_soc_dai_driver sti_uniperiph_dai_template = {
129 .probe = sti_uniperiph_dai_probe,
130 .suspend = sti_uniperiph_dai_suspend,
131 .resume = sti_uniperiph_dai_resume
132};
133
134static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
135 .name = "sti_cpu_dai",
136};
137
138static int sti_uniperiph_cpu_dai_of(struct device_node *node,
139 struct sti_uniperiph_data *priv)
140{
141 const char *str;
142 int ret;
143 struct device *dev = &priv->pdev->dev;
144 struct sti_uniperiph_dai *dai_data = &priv->dai_data;
145 struct snd_soc_dai_driver *dai = priv->dai;
146 struct snd_soc_pcm_stream *stream;
147 struct uniperif *uni;
148
149 uni = devm_kzalloc(dev, sizeof(*uni), GFP_KERNEL);
150 if (!uni)
151 return -ENOMEM;
152
153 *dai = sti_uniperiph_dai_template;
154 ret = of_property_read_string(node, "dai-name", &str);
155 if (ret < 0) {
156 dev_err(dev, "%s: dai name missing.\n", __func__);
157 return -EINVAL;
158 }
159 dai->name = str;
160
161 /* Get resources */
162 uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
163
164 if (!uni->mem_region) {
165 dev_err(dev, "Failed to get memory resource");
166 return -ENODEV;
167 }
168
169 uni->base = devm_ioremap_resource(dev, uni->mem_region);
170
171 if (IS_ERR(uni->base))
172 return PTR_ERR(uni->base);
173
174 uni->fifo_phys_address = uni->mem_region->start +
175 UNIPERIF_FIFO_DATA_OFFSET(uni);
176
177 uni->irq = platform_get_irq(priv->pdev, 0);
178 if (uni->irq < 0) {
179 dev_err(dev, "Failed to get IRQ resource");
180 return -ENXIO;
181 }
182
183 dai_data->uni = uni;
184
185 if (of_device_is_compatible(node, "st,sti-uni-player")) {
186 uni_player_init(priv->pdev, uni);
187 stream = &dai->playback;
188 } else {
189 uni_reader_init(priv->pdev, uni);
190 stream = &dai->capture;
191 }
192 dai->ops = uni->dai_ops;
193
194 stream->stream_name = dai->name;
195 stream->channels_min = uni->hw->channels_min;
196 stream->channels_max = uni->hw->channels_max;
197 stream->rates = uni->hw->rates;
198 stream->formats = uni->hw->formats;
199
200 return 0;
201}
202
203static const struct snd_dmaengine_pcm_config dmaengine_pcm_config = {
204 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
205};
206
207static int sti_uniperiph_probe(struct platform_device *pdev)
208{
209 struct sti_uniperiph_data *priv;
210 struct device_node *node = pdev->dev.of_node;
211 int ret;
212
213 /* Allocate the private data and the CPU_DAI array */
214 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
215 if (!priv)
216 return -ENOMEM;
217 priv->dai = devm_kzalloc(&pdev->dev, sizeof(*priv->dai), GFP_KERNEL);
218 if (!priv->dai)
219 return -ENOMEM;
220
221 priv->pdev = pdev;
222
223 ret = sti_uniperiph_cpu_dai_of(node, priv);
224
225 dev_set_drvdata(&pdev->dev, priv);
226
227 ret = devm_snd_soc_register_component(&pdev->dev,
228 &sti_uniperiph_dai_component,
229 priv->dai, 1);
230 if (ret < 0)
231 return ret;
232
233 return devm_snd_dmaengine_pcm_register(&pdev->dev,
234 &dmaengine_pcm_config, 0);
235}
236
237static const struct of_device_id snd_soc_sti_match[] = {
238 { .compatible = "st,sti-uni-player", },
239 { .compatible = "st,sti-uni-reader", },
240 {},
241};
242
243static struct platform_driver sti_uniperiph_driver = {
244 .driver = {
245 .name = "sti-uniperiph-dai",
246 .of_match_table = snd_soc_sti_match,
247 },
248 .probe = sti_uniperiph_probe,
249};
250module_platform_driver(sti_uniperiph_driver);
251
252MODULE_DESCRIPTION("uniperipheral DAI driver");
253MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
254MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
new file mode 100644
index 000000000000..f0fd5a9944e9
--- /dev/null
+++ b/sound/soc/sti/uniperif.h
@@ -0,0 +1,1229 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2015
3 * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
4 * for STMicroelectronics.
5 * License terms: GNU General Public License (GPL), version 2
6 */
7
8#ifndef __SND_ST_AUD_UNIPERIF_H
9#define __SND_ST_AUD_UNIPERIF_H
10
11#include <linux/regmap.h>
12
13#include <sound/dmaengine_pcm.h>
14
15/*
16 * Register access macros
17 */
18
19#define GET_UNIPERIF_REG(ip, offset, shift, mask) \
20 ((readl_relaxed(ip->base + offset) >> shift) & mask)
21#define SET_UNIPERIF_REG(ip, offset, shift, mask, value) \
22 writel_relaxed(((readl_relaxed(ip->base + offset) & \
23 ~(mask << shift)) | (((value) & mask) << shift)), ip->base + offset)
24#define SET_UNIPERIF_BIT_REG(ip, offset, shift, mask, value) \
25 writel_relaxed((((value) & mask) << shift), ip->base + offset)
26
27/*
28 * AUD_UNIPERIF_SOFT_RST reg
29 */
30
31#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
32#define GET_UNIPERIF_SOFT_RST(ip) \
33 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
34 readl_relaxed(ip->base + UNIPERIF_SOFT_RST_OFFSET(ip)) : 0)
35#define SET_UNIPERIF_SOFT_RST(ip, value) \
36 writel_relaxed(value, ip->base + UNIPERIF_SOFT_RST_OFFSET(ip))
37
38/* SOFT_RST */
39#define UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip) 0x0
40#define UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip) 0x1
41#define SET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
42 SET_UNIPERIF_BIT_REG(ip, \
43 UNIPERIF_SOFT_RST_OFFSET(ip), \
44 UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
45 UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip), 1)
46#define GET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
47 GET_UNIPERIF_REG(ip, \
48 UNIPERIF_SOFT_RST_OFFSET(ip), \
49 UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
50 UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
51
52/*
53 * AUD_UNIPERIF_FIFO_DATA reg
54 */
55
56#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
57#define SET_UNIPERIF_DATA(ip, value) \
58 writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
59
60/*
61 * AUD_UNIPERIF_CHANNEL_STA_REGN reg
62 */
63
64#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
65#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
66 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
67#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
68 writel_relaxed(value, ip->base + \
69 UNIPERIF_CHANNEL_STA_REGN(ip, n))
70
71#define UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip) 0x0060
72#define GET_UNIPERIF_CHANNEL_STA_REG0(ip) \
73 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
74#define SET_UNIPERIF_CHANNEL_STA_REG0(ip, value) \
75 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
76
77#define UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip) 0x0064
78#define GET_UNIPERIF_CHANNEL_STA_REG1(ip) \
79 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
80#define SET_UNIPERIF_CHANNEL_STA_REG1(ip, value) \
81 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
82
83#define UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip) 0x0068
84#define GET_UNIPERIF_CHANNEL_STA_REG2(ip) \
85 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
86#define SET_UNIPERIF_CHANNEL_STA_REG2(ip, value) \
87 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
88
89#define UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip) 0x006C
90#define GET_UNIPERIF_CHANNEL_STA_REG3(ip) \
91 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
92#define SET_UNIPERIF_CHANNEL_STA_REG3(ip, value) \
93 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
94
95#define UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip) 0x0070
96#define GET_UNIPERIF_CHANNEL_STA_REG4(ip) \
97 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
98#define SET_UNIPERIF_CHANNEL_STA_REG4(ip, value) \
99 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
100
101#define UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip) 0x0074
102#define GET_UNIPERIF_CHANNEL_STA_REG5(ip) \
103 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
104#define SET_UNIPERIF_CHANNEL_STA_REG5(ip, value) \
105 writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
106
107/*
108 * AUD_UNIPERIF_ITS reg
109 */
110
111#define UNIPERIF_ITS_OFFSET(ip) 0x000C
112#define GET_UNIPERIF_ITS(ip) \
113 readl_relaxed(ip->base + UNIPERIF_ITS_OFFSET(ip))
114
115/* MEM_BLK_READ */
116#define UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip) 5
117#define UNIPERIF_ITS_MEM_BLK_READ_MASK(ip) \
118 (BIT(UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip)))
119
120/* FIFO_ERROR */
121#define UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip) \
122 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
123#define UNIPERIF_ITS_FIFO_ERROR_MASK(ip) \
124 (BIT(UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip)))
125
126/* DMA_ERROR */
127#define UNIPERIF_ITS_DMA_ERROR_SHIFT(ip) 9
128#define UNIPERIF_ITS_DMA_ERROR_MASK(ip) \
129 (BIT(UNIPERIF_ITS_DMA_ERROR_SHIFT(ip)))
130
131/* UNDERFLOW_REC_DONE */
132#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip) \
133 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
134#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_MASK(ip) \
135 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
136 0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip))))
137
138/* UNDERFLOW_REC_FAILED */
139#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip) \
140 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
141#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_MASK(ip) \
142 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
143 0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
144
145/*
146 * AUD_UNIPERIF_ITS_BCLR reg
147 */
148
149/* FIFO_ERROR */
150#define UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip) \
151 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
152#define UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip) \
153 (BIT(UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip)))
154#define SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(ip) \
155 SET_UNIPERIF_ITS_BCLR(ip, \
156 UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip))
157
158#define UNIPERIF_ITS_BCLR_OFFSET(ip) 0x0010
159#define SET_UNIPERIF_ITS_BCLR(ip, value) \
160 writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
161
162/*
163 * AUD_UNIPERIF_ITM reg
164 */
165
166#define UNIPERIF_ITM_OFFSET(ip) 0x0018
167#define GET_UNIPERIF_ITM(ip) \
168 readl_relaxed(ip->base + UNIPERIF_ITM_OFFSET(ip))
169
170/* FIFO_ERROR */
171#define UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip) \
172 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
173#define UNIPERIF_ITM_FIFO_ERROR_MASK(ip) \
174 (BIT(UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip)))
175
176/* UNDERFLOW_REC_DONE */
177#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip) \
178 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
179#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(ip) \
180 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
181 0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip))))
182
183/* UNDERFLOW_REC_FAILED */
184#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip) \
185 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
186#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(ip) \
187 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
188 0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
189
190/*
191 * AUD_UNIPERIF_ITM_BCLR reg
192 */
193
194#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
195#define SET_UNIPERIF_ITM_BCLR(ip, value) \
196 writel_relaxed(value, ip->base + UNIPERIF_ITM_BCLR_OFFSET(ip))
197
198/* FIFO_ERROR */
199#define UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip) \
200 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
201#define UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip) \
202 (BIT(UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip)))
203#define SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(ip) \
204 SET_UNIPERIF_ITM_BCLR(ip, \
205 UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip))
206
207/* DMA_ERROR */
208#define UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip) 9
209#define UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip) \
210 (BIT(UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip)))
211#define SET_UNIPERIF_ITM_BCLR_DMA_ERROR(ip) \
212 SET_UNIPERIF_ITM_BCLR(ip, \
213 UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
214
215/*
216 * AUD_UNIPERIF_ITM_BSET reg
217 */
218
219#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
220#define SET_UNIPERIF_ITM_BSET(ip, value) \
221 writel_relaxed(value, ip->base + UNIPERIF_ITM_BSET_OFFSET(ip))
222
223/* FIFO_ERROR */
224#define UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip) \
225 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
226#define UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip) \
227 (BIT(UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip)))
228#define SET_UNIPERIF_ITM_BSET_FIFO_ERROR(ip) \
229 SET_UNIPERIF_ITM_BSET(ip, \
230 UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip))
231
232/* MEM_BLK_READ */
233#define UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip) 5
234#define UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip) \
235 (BIT(UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip)))
236#define SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(ip) \
237 SET_UNIPERIF_ITM_BSET(ip, \
238 UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip))
239
240/* DMA_ERROR */
241#define UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip) 9
242#define UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip) \
243 (BIT(UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip)))
244#define SET_UNIPERIF_ITM_BSET_DMA_ERROR(ip) \
245 SET_UNIPERIF_ITM_BSET(ip, \
246 UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip))
247
248/* UNDERFLOW_REC_DONE */
249#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip) \
250 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
251#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip) \
252 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
253 0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip))))
254#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(ip) \
255 SET_UNIPERIF_ITM_BSET(ip, \
256 UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip))
257
258/* UNDERFLOW_REC_FAILED */
259#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip) \
260 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
261#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip) \
262 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
263 0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip))))
264#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(ip) \
265 SET_UNIPERIF_ITM_BSET(ip, \
266 UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip))
267
268/*
269 * UNIPERIF_CONFIG reg
270 */
271
272#define UNIPERIF_CONFIG_OFFSET(ip) 0x0040
273#define GET_UNIPERIF_CONFIG(ip) \
274 readl_relaxed(ip->base + UNIPERIF_CONFIG_OFFSET(ip))
275#define SET_UNIPERIF_CONFIG(ip, value) \
276 writel_relaxed(value, ip->base + UNIPERIF_CONFIG_OFFSET(ip))
277
278/* PARITY_CNTR */
279#define UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip) 0
280#define UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip) 0x1
281#define GET_UNIPERIF_CONFIG_PARITY_CNTR(ip) \
282 GET_UNIPERIF_REG(ip, \
283 UNIPERIF_CONFIG_OFFSET(ip), \
284 UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
285 UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip))
286#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(ip) \
287 SET_UNIPERIF_REG(ip, \
288 UNIPERIF_CONFIG_OFFSET(ip), \
289 UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
290 UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 0)
291#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_SW(ip) \
292 SET_UNIPERIF_REG(ip, \
293 UNIPERIF_CONFIG_OFFSET(ip), \
294 UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
295 UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 1)
296
297/* CHANNEL_STA_CNTR */
298#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip) 1
299#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip) 0x1
300#define GET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR(ip) \
301 GET_UNIPERIF_REG(ip, \
302 UNIPERIF_CONFIG_OFFSET(ip), \
303 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
304 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip))
305#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_SW(ip) \
306 SET_UNIPERIF_REG(ip, \
307 UNIPERIF_CONFIG_OFFSET(ip), \
308 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
309 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 0)
310#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(ip) \
311 SET_UNIPERIF_REG(ip, \
312 UNIPERIF_CONFIG_OFFSET(ip), \
313 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
314 UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 1)
315
316/* USER_DAT_CNTR */
317#define UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip) 2
318#define UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip) 0x1
319#define GET_UNIPERIF_CONFIG_USER_DAT_CNTR(ip) \
320 GET_UNIPERIF_REG(ip, \
321 UNIPERIF_CONFIG_OFFSET(ip), \
322 UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
323 UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip))
324#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(ip) \
325 SET_UNIPERIF_REG(ip, \
326 UNIPERIF_CONFIG_OFFSET(ip), \
327 UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
328 UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 1)
329#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_SW(ip) \
330 SET_UNIPERIF_REG(ip, \
331 UNIPERIF_CONFIG_OFFSET(ip), \
332 UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
333 UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 0)
334
335/* VALIDITY_DAT_CNTR */
336#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip) 3
337#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip) 0x1
338#define GET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR(ip) \
339 GET_UNIPERIF_REG(ip, \
340 UNIPERIF_CONFIG_OFFSET(ip), \
341 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
342 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip))
343#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_SW(ip) \
344 SET_UNIPERIF_REG(ip, \
345 UNIPERIF_CONFIG_OFFSET(ip), \
346 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
347 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 0)
348#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(ip) \
349 SET_UNIPERIF_REG(ip, \
350 UNIPERIF_CONFIG_OFFSET(ip), \
351 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
352 UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 1)
353
354/* ONE_BIT_AUD_SUPPORT */
355#define UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip) 4
356#define UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip) 0x1
357#define GET_UNIPERIF_CONFIG_ONE_BIT_AUD(ip) \
358 GET_UNIPERIF_REG(ip, \
359 UNIPERIF_CONFIG_OFFSET(ip), \
360 UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
361 UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip))
362#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(ip) \
363 SET_UNIPERIF_REG(ip, \
364 UNIPERIF_CONFIG_OFFSET(ip), \
365 UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
366 UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 0)
367#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_ENABLE(ip) \
368 SET_UNIPERIF_REG(ip, \
369 UNIPERIF_CONFIG_OFFSET(ip), \
370 UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
371 UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 1)
372
373/* MEMORY_FMT */
374#define UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip) 5
375#define UNIPERIF_CONFIG_MEM_FMT_MASK(ip) 0x1
376#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) 0
377#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) 1
378#define GET_UNIPERIF_CONFIG_MEM_FMT(ip) \
379 GET_UNIPERIF_REG(ip, \
380 UNIPERIF_CONFIG_OFFSET(ip), \
381 UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
382 UNIPERIF_CONFIG_MEM_FMT_MASK(ip))
383#define SET_UNIPERIF_CONFIG_MEM_FMT(ip, value) \
384 SET_UNIPERIF_REG(ip, \
385 UNIPERIF_CONFIG_OFFSET(ip), \
386 UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
387 UNIPERIF_CONFIG_MEM_FMT_MASK(ip), value)
388#define SET_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) \
389 SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
390 VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip))
391#define SET_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) \
392 SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
393 VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip))
394
395/* REPEAT_CHL_STS */
396#define UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip) 6
397#define UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip) 0x1
398#define GET_UNIPERIF_CONFIG_REPEAT_CHL_STS(ip) \
399 GET_UNIPERIF_REG(ip, \
400 UNIPERIF_CONFIG_OFFSET(ip), \
401 UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
402 UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip))
403#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(ip) \
404 SET_UNIPERIF_REG(ip, \
405 UNIPERIF_CONFIG_OFFSET(ip), \
406 UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
407 UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 0)
408#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_DISABLE(ip) \
409 SET_UNIPERIF_REG(ip, \
410 UNIPERIF_CONFIG_OFFSET(ip), \
411 UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
412 UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 1)
413
414/* BACK_STALL_REQ */
415#define UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip) \
416 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 7 : -1)
417#define UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip) 0x1
418#define GET_UNIPERIF_CONFIG_BACK_STALL_REQ(ip) \
419 GET_UNIPERIF_REG(ip, \
420 UNIPERIF_CONFIG_OFFSET(ip), \
421 UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
422 UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip))
423#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(ip) \
424 SET_UNIPERIF_REG(ip, \
425 UNIPERIF_CONFIG_OFFSET(ip), \
426 UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
427 UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 0)
428#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_ENABLE(ip) \
429 SET_UNIPERIF_REG(ip, \
430 UNIPERIF_CONFIG_OFFSET(ip), \
431 UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
432 UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 1)
433
434/* FDMA_TRIGGER_LIMIT */
435#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip) 8
436#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip) 0x7F
437#define GET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip) \
438 GET_UNIPERIF_REG(ip, \
439 UNIPERIF_CONFIG_OFFSET(ip), \
440 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
441 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip))
442#define SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip, value) \
443 SET_UNIPERIF_REG(ip, \
444 UNIPERIF_CONFIG_OFFSET(ip), \
445 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
446 UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip), value)
447
448/* CHL_STS_UPDATE */
449#define UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip) \
450 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
451#define UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip) 0x1
452#define GET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
453 GET_UNIPERIF_REG(ip, \
454 UNIPERIF_CONFIG_OFFSET(ip), \
455 UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
456 UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip))
457#define SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
458 SET_UNIPERIF_REG(ip, \
459 UNIPERIF_CONFIG_OFFSET(ip), \
460 UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
461 UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip), 1)
462
463/* IDLE_MOD */
464#define UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip) 18
465#define UNIPERIF_CONFIG_IDLE_MOD_MASK(ip) 0x1
466#define GET_UNIPERIF_CONFIG_IDLE_MOD(ip) \
467 GET_UNIPERIF_REG(ip, \
468 UNIPERIF_CONFIG_OFFSET(ip), \
469 UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
470 UNIPERIF_CONFIG_IDLE_MOD_MASK(ip))
471#define SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(ip) \
472 SET_UNIPERIF_REG(ip, \
473 UNIPERIF_CONFIG_OFFSET(ip), \
474 UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
475 UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 0)
476#define SET_UNIPERIF_CONFIG_IDLE_MOD_ENABLE(ip) \
477 SET_UNIPERIF_REG(ip, \
478 UNIPERIF_CONFIG_OFFSET(ip), \
479 UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
480 UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 1)
481
482/* SUBFRAME_SELECTION */
483#define UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip) 19
484#define UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip) 0x1
485#define GET_UNIPERIF_CONFIG_SUBFRAME_SEL(ip) \
486 GET_UNIPERIF_REG(ip, \
487 UNIPERIF_CONFIG_OFFSET(ip), \
488 UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
489 UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip))
490#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(ip) \
491 SET_UNIPERIF_REG(ip, \
492 UNIPERIF_CONFIG_OFFSET(ip), \
493 UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
494 UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 1)
495#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF0_SUBF1(ip) \
496 SET_UNIPERIF_REG(ip, \
497 UNIPERIF_CONFIG_OFFSET(ip), \
498 UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
499 UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 0)
500
501/* FULL_SW_CONTROL */
502#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip) 20
503#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip) 0x1
504#define GET_UNIPERIF_CONFIG_SPDIF_SW_CTRL(ip) \
505 GET_UNIPERIF_REG(ip, \
506 UNIPERIF_CONFIG_OFFSET(ip), \
507 UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
508 UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip))
509#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_ENABLE(ip) \
510 SET_UNIPERIF_REG(ip, \
511 UNIPERIF_CONFIG_OFFSET(ip), \
512 UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
513 UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 1)
514#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(ip) \
515 SET_UNIPERIF_REG(ip, \
516 UNIPERIF_CONFIG_OFFSET(ip), \
517 UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
518 UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 0)
519
520/* MASTER_CLKEDGE */
521#define UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip) \
522 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 24 : -1)
523#define UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip) 0x1
524#define GET_UNIPERIF_CONFIG_MSTR_CLKEDGE(ip) \
525 GET_UNIPERIF_REG(ip, \
526 UNIPERIF_CONFIG_OFFSET(ip), \
527 UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
528 UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip))
529#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_FALLING(ip) \
530 SET_UNIPERIF_REG(ip, \
531 UNIPERIF_CONFIG_OFFSET(ip), \
532 UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
533 UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 1)
534#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_RISING(ip) \
535 SET_UNIPERIF_REG(ip, \
536 UNIPERIF_CONFIG_OFFSET(ip), \
537 UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
538 UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 0)
539
540/*
541 * UNIPERIF_CTRL reg
542 */
543
544#define UNIPERIF_CTRL_OFFSET(ip) 0x0044
545#define GET_UNIPERIF_CTRL(ip) \
546 readl_relaxed(ip->base + UNIPERIF_CTRL_OFFSET(ip))
547#define SET_UNIPERIF_CTRL(ip, value) \
548 writel_relaxed(value, ip->base + UNIPERIF_CTRL_OFFSET(ip))
549
550/* OPERATION */
551#define UNIPERIF_CTRL_OPERATION_SHIFT(ip) 0
552#define UNIPERIF_CTRL_OPERATION_MASK(ip) 0x7
553#define GET_UNIPERIF_CTRL_OPERATION(ip) \
554 GET_UNIPERIF_REG(ip, \
555 UNIPERIF_CTRL_OFFSET(ip), \
556 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
557 UNIPERIF_CTRL_OPERATION_MASK(ip))
558#define VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip) 0
559#define SET_UNIPERIF_CTRL_OPERATION_OFF(ip) \
560 SET_UNIPERIF_REG(ip, \
561 UNIPERIF_CTRL_OFFSET(ip), \
562 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
563 UNIPERIF_CTRL_OPERATION_MASK(ip), \
564 VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip))
565#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
566 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 1 : -1)
567#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
568 SET_UNIPERIF_REG(ip, \
569 UNIPERIF_CTRL_OFFSET(ip), \
570 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
571 UNIPERIF_CTRL_OPERATION_MASK(ip), \
572 VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip))
573#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
574 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 2 : -1)
575#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
576 SET_UNIPERIF_REG(ip, \
577 UNIPERIF_CTRL_OFFSET(ip), \
578 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
579 UNIPERIF_CTRL_OPERATION_MASK(ip), \
580 VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip))
581#define VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) 3
582#define SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) \
583 SET_UNIPERIF_REG(ip, \
584 UNIPERIF_CTRL_OFFSET(ip), \
585 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
586 UNIPERIF_CTRL_OPERATION_MASK(ip), \
587 VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip))
588/* This is the same as above! */
589#define VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) 3
590#define SET_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) \
591 SET_UNIPERIF_REG(ip, \
592 UNIPERIF_CTRL_OFFSET(ip), \
593 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
594 UNIPERIF_CTRL_OPERATION_MASK(ip), \
595 VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip))
596#define VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) 4
597#define SET_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) \
598 SET_UNIPERIF_REG(ip, \
599 UNIPERIF_CTRL_OFFSET(ip), \
600 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
601 UNIPERIF_CTRL_OPERATION_MASK(ip), \
602 VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip))
603#define VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
604 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 5 : -1)
605#define SET_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
606 SET_UNIPERIF_REG(ip, \
607 UNIPERIF_CTRL_OFFSET(ip), \
608 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
609 UNIPERIF_CTRL_OPERATION_MASK(ip), \
610 VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip))
611#define VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
612 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 7)
613#define SET_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
614 SET_UNIPERIF_REG(ip, \
615 UNIPERIF_CTRL_OFFSET(ip), \
616 UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
617 UNIPERIF_CTRL_OPERATION_MASK(ip), \
618 VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip))
619
620/* EXIT_STBY_ON_EOBLOCK */
621#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip) \
622 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 3)
623#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip) 0x1
624#define GET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK(ip) \
625 GET_UNIPERIF_REG(ip, \
626 UNIPERIF_CTRL_OFFSET(ip), \
627 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
628 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip))
629#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(ip) \
630 SET_UNIPERIF_REG(ip, \
631 UNIPERIF_CTRL_OFFSET(ip), \
632 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
633 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 0)
634#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(ip) \
635 SET_UNIPERIF_REG(ip, \
636 UNIPERIF_CTRL_OFFSET(ip), \
637 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
638 UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 1)
639
640/* ROUNDING */
641#define UNIPERIF_CTRL_ROUNDING_SHIFT(ip) 4
642#define UNIPERIF_CTRL_ROUNDING_MASK(ip) 0x1
643#define GET_UNIPERIF_CTRL_ROUNDING(ip) \
644 GET_UNIPERIF_REG(ip, \
645 UNIPERIF_CTRL_OFFSET(ip), \
646 UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
647 UNIPERIF_CTRL_ROUNDING_MASK(ip))
648#define SET_UNIPERIF_CTRL_ROUNDING_OFF(ip) \
649 SET_UNIPERIF_REG(ip, \
650 UNIPERIF_CTRL_OFFSET(ip), \
651 UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
652 UNIPERIF_CTRL_ROUNDING_MASK(ip), 0)
653#define SET_UNIPERIF_CTRL_ROUNDING_ON(ip) \
654 SET_UNIPERIF_REG(ip, \
655 UNIPERIF_CTRL_OFFSET(ip), \
656 UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
657 UNIPERIF_CTRL_ROUNDING_MASK(ip), 1)
658
659/* DIVIDER */
660#define UNIPERIF_CTRL_DIVIDER_SHIFT(ip) 5
661#define UNIPERIF_CTRL_DIVIDER_MASK(ip) 0xff
662#define GET_UNIPERIF_CTRL_DIVIDER(ip) \
663 GET_UNIPERIF_REG(ip, \
664 UNIPERIF_CTRL_OFFSET(ip), \
665 UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
666 UNIPERIF_CTRL_DIVIDER_MASK(ip))
667#define SET_UNIPERIF_CTRL_DIVIDER(ip, value) \
668 SET_UNIPERIF_REG(ip, \
669 UNIPERIF_CTRL_OFFSET(ip), \
670 UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
671 UNIPERIF_CTRL_DIVIDER_MASK(ip), value)
672
673/* BYTE_SWAP */
674#define UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip) \
675 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 13 : -1)
676#define UNIPERIF_CTRL_BYTE_SWP_MASK(ip) 0x1
677#define GET_UNIPERIF_CTRL_BYTE_SWP(ip) \
678 GET_UNIPERIF_REG(ip, \
679 UNIPERIF_CTRL_OFFSET(ip), \
680 UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
681 UNIPERIF_CTRL_BYTE_SWP_MASK(ip))
682#define SET_UNIPERIF_CTRL_BYTE_SWP_OFF(ip) \
683 SET_UNIPERIF_REG(ip, \
684 UNIPERIF_CTRL_OFFSET(ip), \
685 UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
686 UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 0)
687#define SET_UNIPERIF_CTRL_BYTE_SWP_ON(ip) \
688 SET_UNIPERIF_REG(ip, \
689 UNIPERIF_CTRL_OFFSET(ip), \
690 UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
691 UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 1)
692
693/* ZERO_STUFFING_HW_SW */
694#define UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip) \
695 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 14 : -1)
696#define UNIPERIF_CTRL_ZERO_STUFF_MASK(ip) 0x1
697#define GET_UNIPERIF_CTRL_ZERO_STUFF(ip) \
698 GET_UNIPERIF_REG(ip, \
699 UNIPERIF_CTRL_OFFSET(ip), \
700 UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
701 UNIPERIF_CTRL_ZERO_STUFF_MASK(ip))
702#define SET_UNIPERIF_CTRL_ZERO_STUFF_HW(ip) \
703 SET_UNIPERIF_REG(ip, \
704 UNIPERIF_CTRL_OFFSET(ip), \
705 UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
706 UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 1)
707#define SET_UNIPERIF_CTRL_ZERO_STUFF_SW(ip) \
708 SET_UNIPERIF_REG(ip, \
709 UNIPERIF_CTRL_OFFSET(ip), \
710 UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
711 UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 0)
712
713/* SPDIF_LAT */
714#define UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip) \
715 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
716#define UNIPERIF_CTRL_SPDIF_LAT_MASK(ip) 0x1
717#define GET_UNIPERIF_CTRL_SPDIF_LAT(ip) \
718 GET_UNIPERIF_REG(ip, \
719 UNIPERIF_CTRL_OFFSET(ip), \
720 UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
721 UNIPERIF_CTRL_SPDIF_LAT_MASK(ip))
722#define SET_UNIPERIF_CTRL_SPDIF_LAT_ON(ip) \
723 SET_UNIPERIF_REG(ip, \
724 UNIPERIF_CTRL_OFFSET(ip), \
725 UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
726 UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 1)
727#define SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(ip) \
728 SET_UNIPERIF_REG(ip, \
729 UNIPERIF_CTRL_OFFSET(ip), \
730 UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
731 UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 0)
732
733/* EN_SPDIF_FORMATTING */
734#define UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip) 17
735#define UNIPERIF_CTRL_SPDIF_FMT_MASK(ip) 0x1
736#define GET_UNIPERIF_CTRL_SPDIF_FMT(ip) \
737 GET_UNIPERIF_REG(ip, \
738 UNIPERIF_CTRL_OFFSET(ip), \
739 UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
740 UNIPERIF_CTRL_SPDIF_FMT_MASK(ip))
741#define SET_UNIPERIF_CTRL_SPDIF_FMT_ON(ip) \
742 SET_UNIPERIF_REG(ip, \
743 UNIPERIF_CTRL_OFFSET(ip), \
744 UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
745 UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 1)
746#define SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(ip) \
747 SET_UNIPERIF_REG(ip, \
748 UNIPERIF_CTRL_OFFSET(ip), \
749 UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
750 UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 0)
751
752/* READER_OUT_SELECT */
753#define UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip) \
754 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 18 : -1)
755#define UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip) 0x1
756#define GET_UNIPERIF_CTRL_READER_OUT_SEL(ip) \
757 GET_UNIPERIF_REG(ip, \
758 UNIPERIF_CTRL_OFFSET(ip), \
759 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
760 UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip))
761#define SET_UNIPERIF_CTRL_READER_OUT_SEL_IN_MEM(ip) \
762 SET_UNIPERIF_REG(ip, \
763 UNIPERIF_CTRL_OFFSET(ip), \
764 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
765 UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 0)
766#define SET_UNIPERIF_CTRL_READER_OUT_SEL_ON_I2S_LINE(ip) \
767 SET_UNIPERIF_REG(ip, \
768 UNIPERIF_CTRL_OFFSET(ip), \
769 UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
770 CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
771
772/* UNDERFLOW_REC_WINDOW */
773#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
774#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip) 0xff
775#define GET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip) \
776 GET_UNIPERIF_REG(ip, \
777 UNIPERIF_CTRL_OFFSET(ip), \
778 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
779 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip))
780#define SET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip, value) \
781 SET_UNIPERIF_REG(ip, \
782 UNIPERIF_CTRL_OFFSET(ip), \
783 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
784 UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip), value)
785
786/*
787 * UNIPERIF_I2S_FMT a.k.a UNIPERIF_FORMAT reg
788 */
789
790#define UNIPERIF_I2S_FMT_OFFSET(ip) 0x0048
791#define GET_UNIPERIF_I2S_FMT(ip) \
792 readl_relaxed(ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
793#define SET_UNIPERIF_I2S_FMT(ip, value) \
794 writel_relaxed(value, ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
795
796/* NBIT */
797#define UNIPERIF_I2S_FMT_NBIT_SHIFT(ip) 0
798#define UNIPERIF_I2S_FMT_NBIT_MASK(ip) 0x1
799#define GET_UNIPERIF_I2S_FMT_NBIT(ip) \
800 GET_UNIPERIF_REG(ip, \
801 UNIPERIF_I2S_FMT_OFFSET(ip), \
802 UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
803 UNIPERIF_I2S_FMT_NBIT_MASK(ip))
804#define SET_UNIPERIF_I2S_FMT_NBIT_32(ip) \
805 SET_UNIPERIF_REG(ip, \
806 UNIPERIF_I2S_FMT_OFFSET(ip), \
807 UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
808 UNIPERIF_I2S_FMT_NBIT_MASK(ip), 0)
809#define SET_UNIPERIF_I2S_FMT_NBIT_16(ip) \
810 SET_UNIPERIF_REG(ip, \
811 UNIPERIF_I2S_FMT_OFFSET(ip), \
812 UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
813 UNIPERIF_I2S_FMT_NBIT_MASK(ip), 1)
814
815/* DATA_SIZE */
816#define UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip) 1
817#define UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip) 0x7
818#define GET_UNIPERIF_I2S_FMT_DATA_SIZE(ip) \
819 GET_UNIPERIF_REG(ip, \
820 UNIPERIF_I2S_FMT_OFFSET(ip), \
821 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
822 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip))
823#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(ip) \
824 SET_UNIPERIF_REG(ip, \
825 UNIPERIF_I2S_FMT_OFFSET(ip), \
826 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
827 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 0)
828#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_18(ip) \
829 SET_UNIPERIF_REG(ip, \
830 UNIPERIF_I2S_FMT_OFFSET(ip), \
831 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
832 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 1)
833#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_20(ip) \
834 SET_UNIPERIF_REG(ip, \
835 UNIPERIF_I2S_FMT_OFFSET(ip), \
836 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
837 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 2)
838#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(ip) \
839 SET_UNIPERIF_REG(ip, \
840 UNIPERIF_I2S_FMT_OFFSET(ip), \
841 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
842 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 3)
843#define SET_UNIPERIF_I2S_FMTL_DATA_SIZE_28(ip) \
844 SET_UNIPERIF_REG(ip, \
845 UNIPERIF_I2S_FMT_OFFSET(ip), \
846 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
847 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 4)
848#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(ip) \
849 SET_UNIPERIF_REG(ip, \
850 UNIPERIF_I2S_FMT_OFFSET(ip), \
851 UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
852 UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 5)
853
854/* LR_POL */
855#define UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip) 4
856#define UNIPERIF_I2S_FMT_LR_POL_MASK(ip) 0x1
857#define VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) 0x0
858#define VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) 0x1
859#define GET_UNIPERIF_I2S_FMT_LR_POL(ip) \
860 GET_UNIPERIF_REG(ip, \
861 UNIPERIF_I2S_FMT_OFFSET(ip), \
862 UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
863 UNIPERIF_I2S_FMT_LR_POL_MASK(ip))
864#define SET_UNIPERIF_I2S_FMT_LR_POL(ip, value) \
865 SET_UNIPERIF_REG(ip, \
866 UNIPERIF_I2S_FMT_OFFSET(ip), \
867 UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
868 UNIPERIF_I2S_FMT_LR_POL_MASK(ip), value)
869#define SET_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) \
870 SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
871 VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip))
872#define SET_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) \
873 SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
874 VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip))
875
876/* SCLK_EDGE */
877#define UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip) 5
878#define UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip) 0x1
879#define GET_UNIPERIF_I2S_FMT_SCLK_EDGE(ip) \
880 GET_UNIPERIF_REG(ip, \
881 UNIPERIF_I2S_FMT_OFFSET(ip), \
882 UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
883 UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip))
884#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(ip) \
885 SET_UNIPERIF_REG(ip, \
886 UNIPERIF_I2S_FMT_OFFSET(ip), \
887 UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
888 UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 0)
889#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(ip) \
890 SET_UNIPERIF_REG(ip, \
891 UNIPERIF_I2S_FMT_OFFSET(ip), \
892 UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
893 UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 1)
894
895/* PADDING */
896#define UNIPERIF_I2S_FMT_PADDING_SHIFT(ip) 6
897#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
898#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
899#define VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) 0x0
900#define VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) 0x1
901#define GET_UNIPERIF_I2S_FMT_PADDING(ip) \
902 GET_UNIPERIF_REG(ip, \
903 UNIPERIF_I2S_FMT_OFFSET(ip), \
904 UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
905 UNIPERIF_I2S_FMT_PADDING_MASK(ip))
906#define SET_UNIPERIF_I2S_FMT_PADDING(ip, value) \
907 SET_UNIPERIF_REG(ip, \
908 UNIPERIF_I2S_FMT_OFFSET(ip), \
909 UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
910 UNIPERIF_I2S_FMT_PADDING_MASK(ip), value)
911#define SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) \
912 SET_UNIPERIF_I2S_FMT_PADDING(ip, \
913 VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip))
914#define SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) \
915 SET_UNIPERIF_I2S_FMT_PADDING(ip, \
916 VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip))
917
918/* ALIGN */
919#define UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip) 7
920#define UNIPERIF_I2S_FMT_ALIGN_MASK(ip) 0x1
921#define GET_UNIPERIF_I2S_FMT_ALIGN(ip) \
922 GET_UNIPERIF_REG(ip, \
923 UNIPERIF_I2S_FMT_OFFSET(ip), \
924 UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
925 UNIPERIF_I2S_FMT_ALIGN_MASK(ip))
926#define SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(ip) \
927 SET_UNIPERIF_REG(ip, \
928 UNIPERIF_I2S_FMT_OFFSET(ip), \
929 UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
930 UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 0)
931#define SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(ip) \
932 SET_UNIPERIF_REG(ip, \
933 UNIPERIF_I2S_FMT_OFFSET(ip), \
934 UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
935 UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 1)
936
937/* ORDER */
938#define UNIPERIF_I2S_FMT_ORDER_SHIFT(ip) 8
939#define UNIPERIF_I2S_FMT_ORDER_MASK(ip) 0x1
940#define GET_UNIPERIF_I2S_FMT_ORDER(ip) \
941 GET_UNIPERIF_REG(ip, \
942 UNIPERIF_I2S_FMT_OFFSET(ip), \
943 UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
944 UNIPERIF_I2S_FMT_ORDER_MASK(ip))
945#define SET_UNIPERIF_I2S_FMT_ORDER_LSB(ip) \
946 SET_UNIPERIF_REG(ip, \
947 UNIPERIF_I2S_FMT_OFFSET(ip), \
948 UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
949 UNIPERIF_I2S_FMT_ORDER_MASK(ip), 0)
950#define SET_UNIPERIF_I2S_FMT_ORDER_MSB(ip) \
951 SET_UNIPERIF_REG(ip, \
952 UNIPERIF_I2S_FMT_OFFSET(ip), \
953 UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
954 UNIPERIF_I2S_FMT_ORDER_MASK(ip), 1)
955
956/* NUM_CH */
957#define UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip) 9
958#define UNIPERIF_I2S_FMT_NUM_CH_MASK(ip) 0x7
959#define GET_UNIPERIF_I2S_FMT_NUM_CH(ip) \
960 GET_UNIPERIF_REG(ip, \
961 UNIPERIF_I2S_FMT_OFFSET(ip), \
962 UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
963 UNIPERIF_I2S_FMT_NUM_CH_MASK(ip))
964#define SET_UNIPERIF_I2S_FMT_NUM_CH(ip, value) \
965 SET_UNIPERIF_REG(ip, \
966 UNIPERIF_I2S_FMT_OFFSET(ip), \
967 UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
968 UNIPERIF_I2S_FMT_NUM_CH_MASK(ip), value)
969
970/* NO_OF_SAMPLES_TO_READ */
971#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip) 12
972#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip) 0xfffff
973#define GET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip) \
974 GET_UNIPERIF_REG(ip, \
975 UNIPERIF_I2S_FMT_OFFSET(ip), \
976 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
977 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip))
978#define SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip, value) \
979 SET_UNIPERIF_REG(ip, \
980 UNIPERIF_I2S_FMT_OFFSET(ip), \
981 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
982 UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip), value)
983
984/*
985 * UNIPERIF_BIT_CONTROL reg
986 */
987
988#define UNIPERIF_BIT_CONTROL_OFFSET(ip) \
989 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0x004c)
990#define GET_UNIPERIF_BIT_CONTROL(ip) \
991 readl_relaxed(ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
992#define SET_UNIPERIF_BIT_CONTROL(ip, value) \
993 writel_relaxed(value, ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
994
995/* CLR_UNDERFLOW_DURATION */
996#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip) 0
997#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip) 0x1
998#define GET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
999 GET_UNIPERIF_REG(ip, \
1000 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1001 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
1002 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip))
1003#define SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
1004 SET_UNIPERIF_REG(ip, \
1005 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1006 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
1007 UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip), 1)
1008
1009/* CHL_STS_UPDATE */
1010#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip) 1
1011#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip) 0x1
1012#define GET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
1013 GET_UNIPERIF_REG(ip, \
1014 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1015 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
1016 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip))
1017#define SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
1018 SET_UNIPERIF_BIT_REG(ip, \
1019 UNIPERIF_BIT_CONTROL_OFFSET(ip), \
1020 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
1021 UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip), 1)
1022
1023/*
1024 * UNIPERIF_STATUS_1 reg
1025 */
1026
1027#define UNIPERIF_STATUS_1_OFFSET(ip) 0x0050
1028#define GET_UNIPERIF_STATUS_1(ip) \
1029 readl_relaxed(ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
1030#define SET_UNIPERIF_STATUS_1(ip, value) \
1031 writel_relaxed(value, ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
1032
1033/* UNDERFLOW_DURATION */
1034#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip) \
1035 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
1036#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip) 0xff
1037#define GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip) \
1038 GET_UNIPERIF_REG(ip, \
1039 UNIPERIF_STATUS_1_OFFSET(ip), \
1040 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
1041 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip))
1042#define SET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip, value) \
1043 SET_UNIPERIF_REG(ip, \
1044 UNIPERIF_STATUS_1_OFFSET(ip), \
1045 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
1046 UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
1047
1048/*
1049 * AUD_UNIPERIF_CHANNEL_STA_REGN reg
1050 */
1051
1052#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
1053#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
1054 readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
1055#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
1056 writel_relaxed(value, ip->base + \
1057 UNIPERIF_CHANNEL_STA_REGN(ip, n))
1058
1059/*
1060 * AUD_UNIPERIF_USER_VALIDITY reg
1061 */
1062
1063#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
1064#define GET_UNIPERIF_USER_VALIDITY(ip) \
1065 readl_relaxed(ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
1066#define SET_UNIPERIF_USER_VALIDITY(ip, value) \
1067 writel_relaxed(value, ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
1068
1069/* VALIDITY_LEFT_AND_RIGHT */
1070#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip) 0
1071#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip) 0x3
1072#define GET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip) \
1073 GET_UNIPERIF_REG(ip, \
1074 UNIPERIF_USER_VALIDITY_OFFSET(ip), \
1075 UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
1076 UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip))
1077#define SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip, value) \
1078 SET_UNIPERIF_REG(ip, \
1079 UNIPERIF_USER_VALIDITY_OFFSET(ip), \
1080 UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
1081 UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip), \
1082 value ? 0x3 : 0)
1083
1084/*
1085 * UNIPERIF_DBG_STANDBY_LEFT_SP reg
1086 */
1087#define UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip) 0x0150
1088#define UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip) \
1089 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
1090#define UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip) \
1091 ((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 0xFFFFFF)
1092#define GET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip) \
1093 GET_UNIPERIF_REG(ip, \
1094 UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
1095 UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
1096 UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip))
1097#define SET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip, value) \
1098 SET_UNIPERIF_REG(ip, \
1099 UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
1100 UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
1101 UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
1102
1103/*
1104 * uniperipheral IP capabilities
1105 */
1106
1107#define UNIPERIF_FIFO_SIZE 70 /* FIFO is 70 cells deep */
1108#define UNIPERIF_FIFO_FRAMES 4 /* FDMA trigger limit in frames */
1109
1110/*
1111 * Uniperipheral IP revisions
1112 */
1113enum uniperif_version {
1114 SND_ST_UNIPERIF_VERSION_UNKNOWN,
1115 /* SASG1 (Orly), Newman */
1116 SND_ST_UNIPERIF_VERSION_C6AUD0_UNI_1_0,
1117 /* SASC1, SASG2 (Orly2) */
1118 SND_ST_UNIPERIF_VERSION_UNI_PLR_1_0,
1119 /* SASC1, SASG2 (Orly2), TELSS, Cannes */
1120 SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
1121 /* TELSS (SASC1) */
1122 SND_ST_UNIPERIF_VERSION_TDM_PLR_1_0,
1123 /* Cannes/Monaco */
1124 SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
1125};
1126
1127enum uniperif_type {
1128 SND_ST_UNIPERIF_PLAYER_TYPE_NONE,
1129 SND_ST_UNIPERIF_PLAYER_TYPE_HDMI,
1130 SND_ST_UNIPERIF_PLAYER_TYPE_PCM,
1131 SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF
1132};
1133
1134enum uniperif_state {
1135 UNIPERIF_STATE_STOPPED,
1136 UNIPERIF_STATE_STARTED,
1137 UNIPERIF_STATE_STANDBY,
1138 UNIPERIF_STATE_UNDERFLOW,
1139 UNIPERIF_STATE_OVERFLOW = UNIPERIF_STATE_UNDERFLOW,
1140 UNIPERIF_STATE_XRUN
1141};
1142
1143enum uniperif_iec958_encoding_mode {
1144 UNIPERIF_IEC958_ENCODING_MODE_PCM,
1145 UNIPERIF_IEC958_ENCODING_MODE_ENCODED
1146};
1147
1148struct uniperif_info {
1149 int id; /* instance value of the uniperipheral IP */
1150 enum uniperif_type player_type;
1151 int underflow_enabled; /* Underflow recovery mode */
1152};
1153
1154struct uniperif_iec958_settings {
1155 enum uniperif_iec958_encoding_mode encoding_mode;
1156 struct snd_aes_iec958 iec958;
1157};
1158
1159struct uniperif {
1160 /* System information */
1161 struct uniperif_info *info;
1162 struct device *dev;
1163 int ver; /* IP version, used by register access macros */
1164 struct regmap_field *clk_sel;
1165
1166 /* capabilities */
1167 const struct snd_pcm_hardware *hw;
1168
1169 /* Resources */
1170 struct resource *mem_region;
1171 void __iomem *base;
1172 unsigned long fifo_phys_address;
1173 int irq;
1174
1175 /* Clocks */
1176 struct clk *clk;
1177 int mclk;
1178 int clk_adj;
1179
1180 /* Runtime data */
1181 enum uniperif_state state;
1182
1183 struct snd_pcm_substream *substream;
1184
1185 /* Specific to IEC958 player */
1186 struct uniperif_iec958_settings stream_settings;
1187 struct mutex ctrl_lock; /* For resource updated by stream and controls*/
1188
1189 /*alsa ctrl*/
1190 struct snd_kcontrol_new *snd_ctrls;
1191 int num_ctrls;
1192
1193 /* dai properties */
1194 unsigned int daifmt;
1195
1196 /* DAI callbacks */
1197 const struct snd_soc_dai_ops *dai_ops;
1198};
1199
1200struct sti_uniperiph_dai {
1201 int stream;
1202 struct uniperif *uni;
1203 struct snd_dmaengine_dai_dma_data dma_data;
1204};
1205
1206struct sti_uniperiph_data {
1207 struct platform_device *pdev;
1208 struct snd_soc_dai_driver *dai;
1209 struct sti_uniperiph_dai dai_data;
1210};
1211
1212/* uniperiph player*/
1213int uni_player_init(struct platform_device *pdev,
1214 struct uniperif *uni_player);
1215int uni_player_resume(struct uniperif *player);
1216
1217/* uniperiph reader */
1218int uni_reader_init(struct platform_device *pdev,
1219 struct uniperif *uni_reader);
1220
1221/* common */
1222int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai,
1223 unsigned int fmt);
1224
1225int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
1226 struct snd_pcm_hw_params *params,
1227 struct snd_soc_dai *dai);
1228
1229#endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
new file mode 100644
index 000000000000..f6eefe1b8f8f
--- /dev/null
+++ b/sound/soc/sti/uniperif_player.c
@@ -0,0 +1,1110 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2015
3 * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
4 * for STMicroelectronics.
5 * License terms: GNU General Public License (GPL), version 2
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/io.h>
11#include <linux/mfd/syscon.h>
12
13#include <sound/asoundef.h>
14#include <sound/soc.h>
15
16#include "uniperif.h"
17
18/*
19 * Some hardware-related definitions
20 */
21
22/* sys config registers definitions */
23#define SYS_CFG_AUDIO_GLUE 0xA4
24#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
25
26/*
27 * Driver specific types.
28 */
29#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
30 ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
31#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
32 ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
33#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
34 ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
35#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
36 (UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
37 UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
38
39#define UNIPERIF_PLAYER_CLK_ADJ_MIN -999999
40#define UNIPERIF_PLAYER_CLK_ADJ_MAX 1000000
41
42/*
43 * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
44 * integrate DAI_CPU capability in term of rate and supported channels
45 */
46static const struct snd_pcm_hardware uni_player_pcm_hw = {
47 .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
48 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
49 SNDRV_PCM_INFO_MMAP_VALID,
50 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
51
52 .rates = SNDRV_PCM_RATE_CONTINUOUS,
53 .rate_min = 8000,
54 .rate_max = 192000,
55
56 .channels_min = 2,
57 .channels_max = 8,
58
59 .periods_min = 2,
60 .periods_max = 48,
61
62 .period_bytes_min = 128,
63 .period_bytes_max = 64 * PAGE_SIZE,
64 .buffer_bytes_max = 256 * PAGE_SIZE
65};
66
67static inline int reset_player(struct uniperif *player)
68{
69 int count = 10;
70
71 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
72 while (GET_UNIPERIF_SOFT_RST_SOFT_RST(player) && count) {
73 udelay(5);
74 count--;
75 }
76 }
77
78 if (!count) {
79 dev_err(player->dev, "Failed to reset uniperif");
80 return -EIO;
81 }
82
83 return 0;
84}
85
86/*
87 * uni_player_irq_handler
88 * In case of error audio stream is stopped; stop action is protected via PCM
89 * stream lock to avoid race condition with trigger callback.
90 */
91static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
92{
93 irqreturn_t ret = IRQ_NONE;
94 struct uniperif *player = dev_id;
95 unsigned int status;
96 unsigned int tmp;
97
98 if (player->state == UNIPERIF_STATE_STOPPED) {
99 /* Unexpected IRQ: do nothing */
100 return IRQ_NONE;
101 }
102
103 /* Get interrupt status & clear them immediately */
104 status = GET_UNIPERIF_ITS(player);
105 SET_UNIPERIF_ITS_BCLR(player, status);
106
107 /* Check for fifo error (underrun) */
108 if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
109 dev_err(player->dev, "FIFO underflow error detected");
110
111 /* Interrupt is just for information when underflow recovery */
112 if (player->info->underflow_enabled) {
113 /* Update state to underflow */
114 player->state = UNIPERIF_STATE_UNDERFLOW;
115
116 } else {
117 /* Disable interrupt so doesn't continually fire */
118 SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
119
120 /* Stop the player */
121 snd_pcm_stream_lock(player->substream);
122 snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
123 snd_pcm_stream_unlock(player->substream);
124 }
125
126 ret = IRQ_HANDLED;
127 }
128
129 /* Check for dma error (overrun) */
130 if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
131 dev_err(player->dev, "DMA error detected");
132
133 /* Disable interrupt so doesn't continually fire */
134 SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
135
136 /* Stop the player */
137 snd_pcm_stream_lock(player->substream);
138 snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
139 snd_pcm_stream_unlock(player->substream);
140
141 ret = IRQ_HANDLED;
142 }
143
144 /* Check for underflow recovery done */
145 if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
146 if (!player->info->underflow_enabled) {
147 dev_err(player->dev, "unexpected Underflow recovering");
148 return -EPERM;
149 }
150 /* Read the underflow recovery duration */
151 tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
152
153 /* Clear the underflow recovery duration */
154 SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
155
156 /* Update state to started */
157 player->state = UNIPERIF_STATE_STARTED;
158
159 ret = IRQ_HANDLED;
160 }
161
162 /* Check if underflow recovery failed */
163 if (unlikely(status &
164 UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
165 dev_err(player->dev, "Underflow recovery failed");
166
167 /* Stop the player */
168 snd_pcm_stream_lock(player->substream);
169 snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
170 snd_pcm_stream_unlock(player->substream);
171
172 ret = IRQ_HANDLED;
173 }
174
175 return ret;
176}
177
178static int uni_player_clk_set_rate(struct uniperif *player, unsigned long rate)
179{
180 int rate_adjusted, rate_achieved, delta, ret;
181 int adjustment = player->clk_adj;
182
183 /*
184 * a
185 * F = f + --------- * f = f + d
186 * 1000000
187 *
188 * a
189 * d = --------- * f
190 * 1000000
191 *
192 * where:
193 * f - nominal rate
194 * a - adjustment in ppm (parts per milion)
195 * F - rate to be set in synthesizer
196 * d - delta (difference) between f and F
197 */
198 if (adjustment < 0) {
199 /* div64_64 operates on unsigned values... */
200 delta = -1;
201 adjustment = -adjustment;
202 } else {
203 delta = 1;
204 }
205 /* 500000 ppm is 0.5, which is used to round up values */
206 delta *= (int)div64_u64((uint64_t)rate *
207 (uint64_t)adjustment + 500000, 1000000);
208 rate_adjusted = rate + delta;
209
210 /* Adjusted rate should never be == 0 */
211 if (!rate_adjusted)
212 return -EINVAL;
213
214 ret = clk_set_rate(player->clk, rate_adjusted);
215 if (ret < 0)
216 return ret;
217
218 rate_achieved = clk_get_rate(player->clk);
219 if (!rate_achieved)
220 /* If value is 0 means that clock or parent not valid */
221 return -EINVAL;
222
223 /*
224 * Using ALSA's adjustment control, we can modify the rate to be up
225 * to twice as much as requested, but no more
226 */
227 delta = rate_achieved - rate;
228 if (delta < 0) {
229 /* div64_64 operates on unsigned values... */
230 delta = -delta;
231 adjustment = -1;
232 } else {
233 adjustment = 1;
234 }
235 /* Frequency/2 is added to round up result */
236 adjustment *= (int)div64_u64((uint64_t)delta * 1000000 + rate / 2,
237 rate);
238 player->clk_adj = adjustment;
239 return 0;
240}
241
242static void uni_player_set_channel_status(struct uniperif *player,
243 struct snd_pcm_runtime *runtime)
244{
245 int n;
246 unsigned int status;
247
248 /*
249 * Some AVRs and TVs require the channel status to contain a correct
250 * sampling frequency. If no sample rate is already specified, then
251 * set one.
252 */
253 mutex_lock(&player->ctrl_lock);
254 if (runtime && (player->stream_settings.iec958.status[3]
255 == IEC958_AES3_CON_FS_NOTID)) {
256 switch (runtime->rate) {
257 case 22050:
258 player->stream_settings.iec958.status[3] =
259 IEC958_AES3_CON_FS_22050;
260 break;
261 case 44100:
262 player->stream_settings.iec958.status[3] =
263 IEC958_AES3_CON_FS_44100;
264 break;
265 case 88200:
266 player->stream_settings.iec958.status[3] =
267 IEC958_AES3_CON_FS_88200;
268 break;
269 case 176400:
270 player->stream_settings.iec958.status[3] =
271 IEC958_AES3_CON_FS_176400;
272 break;
273 case 24000:
274 player->stream_settings.iec958.status[3] =
275 IEC958_AES3_CON_FS_24000;
276 break;
277 case 48000:
278 player->stream_settings.iec958.status[3] =
279 IEC958_AES3_CON_FS_48000;
280 break;
281 case 96000:
282 player->stream_settings.iec958.status[3] =
283 IEC958_AES3_CON_FS_96000;
284 break;
285 case 192000:
286 player->stream_settings.iec958.status[3] =
287 IEC958_AES3_CON_FS_192000;
288 break;
289 case 32000:
290 player->stream_settings.iec958.status[3] =
291 IEC958_AES3_CON_FS_32000;
292 break;
293 default:
294 /* Mark as sampling frequency not indicated */
295 player->stream_settings.iec958.status[3] =
296 IEC958_AES3_CON_FS_NOTID;
297 break;
298 }
299 }
300
301 /* Audio mode:
302 * Use audio mode status to select PCM or encoded mode
303 */
304 if (player->stream_settings.iec958.status[0] & IEC958_AES0_NONAUDIO)
305 player->stream_settings.encoding_mode =
306 UNIPERIF_IEC958_ENCODING_MODE_ENCODED;
307 else
308 player->stream_settings.encoding_mode =
309 UNIPERIF_IEC958_ENCODING_MODE_PCM;
310
311 if (player->stream_settings.encoding_mode ==
312 UNIPERIF_IEC958_ENCODING_MODE_PCM)
313 /* Clear user validity bits */
314 SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
315 else
316 /* Set user validity bits */
317 SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 1);
318
319 /* Program the new channel status */
320 for (n = 0; n < 6; ++n) {
321 status =
322 player->stream_settings.iec958.status[0 + (n * 4)] & 0xf;
323 status |=
324 player->stream_settings.iec958.status[1 + (n * 4)] << 8;
325 status |=
326 player->stream_settings.iec958.status[2 + (n * 4)] << 16;
327 status |=
328 player->stream_settings.iec958.status[3 + (n * 4)] << 24;
329 SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
330 }
331 mutex_unlock(&player->ctrl_lock);
332
333 /* Update the channel status */
334 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
335 SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
336 else
337 SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
338}
339
340static int uni_player_prepare_iec958(struct uniperif *player,
341 struct snd_pcm_runtime *runtime)
342{
343 int clk_div;
344
345 clk_div = player->mclk / runtime->rate;
346
347 /* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
348 if ((clk_div % 128) || (clk_div <= 0)) {
349 dev_err(player->dev, "%s: invalid clk_div %d",
350 __func__, clk_div);
351 return -EINVAL;
352 }
353
354 switch (runtime->format) {
355 case SNDRV_PCM_FORMAT_S16_LE:
356 /* 16/16 memory format */
357 SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
358 /* 16-bits per sub-frame */
359 SET_UNIPERIF_I2S_FMT_NBIT_32(player);
360 /* Set 16-bit sample precision */
361 SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
362 break;
363 case SNDRV_PCM_FORMAT_S32_LE:
364 /* 16/0 memory format */
365 SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
366 /* 32-bits per sub-frame */
367 SET_UNIPERIF_I2S_FMT_NBIT_32(player);
368 /* Set 24-bit sample precision */
369 SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
370 break;
371 default:
372 dev_err(player->dev, "format not supported");
373 return -EINVAL;
374 }
375
376 /* Set parity to be calculated by the hardware */
377 SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(player);
378
379 /* Set channel status bits to be inserted by the hardware */
380 SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(player);
381
382 /* Set user data bits to be inserted by the hardware */
383 SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(player);
384
385 /* Set validity bits to be inserted by the hardware */
386 SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(player);
387
388 /* Set full software control to disabled */
389 SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(player);
390
391 SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
392
393 /* Update the channel status */
394 uni_player_set_channel_status(player, runtime);
395
396 /* Clear the user validity user bits */
397 SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
398
399 /* Disable one-bit audio mode */
400 SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
401
402 /* Enable consecutive frames repetition of Z preamble (not for HBRA) */
403 SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(player);
404
405 /* Change to SUF0_SUBF1 and left/right channels swap! */
406 SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(player);
407
408 /* Set data output as MSB first */
409 SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
410
411 if (player->stream_settings.encoding_mode ==
412 UNIPERIF_IEC958_ENCODING_MODE_ENCODED)
413 SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(player);
414 else
415 SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(player);
416
417 SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
418
419 /* Set rounding to off */
420 SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
421
422 /* Set clock divisor */
423 SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / 128);
424
425 /* Set the spdif latency to not wait before starting player */
426 SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
427
428 /*
429 * Ensure iec958 formatting is off. It will be enabled in function
430 * uni_player_start() at the same time as the operation
431 * mode is set to work around a silicon issue.
432 */
433 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
434 SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
435 else
436 SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
437
438 return 0;
439}
440
441static int uni_player_prepare_pcm(struct uniperif *player,
442 struct snd_pcm_runtime *runtime)
443{
444 int output_frame_size, slot_width, clk_div;
445
446 /* Force slot width to 32 in I2S mode (HW constraint) */
447 if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
448 SND_SOC_DAIFMT_I2S) {
449 slot_width = 32;
450 } else {
451 switch (runtime->format) {
452 case SNDRV_PCM_FORMAT_S16_LE:
453 slot_width = 16;
454 break;
455 default:
456 slot_width = 32;
457 break;
458 }
459 }
460 output_frame_size = slot_width * runtime->channels;
461
462 clk_div = player->mclk / runtime->rate;
463 /*
464 * For 32 bits subframe clk_div must be a multiple of 128,
465 * for 16 bits must be a multiple of 64
466 */
467 if ((slot_width == 32) && (clk_div % 128)) {
468 dev_err(player->dev, "%s: invalid clk_div", __func__);
469 return -EINVAL;
470 }
471
472 if ((slot_width == 16) && (clk_div % 64)) {
473 dev_err(player->dev, "%s: invalid clk_div", __func__);
474 return -EINVAL;
475 }
476
477 /*
478 * Number of bits per subframe (which is one channel sample)
479 * on output - Transfer 16 or 32 bits from FIFO
480 */
481 switch (slot_width) {
482 case 32:
483 SET_UNIPERIF_I2S_FMT_NBIT_32(player);
484 SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
485 break;
486 case 16:
487 SET_UNIPERIF_I2S_FMT_NBIT_16(player);
488 SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
489 break;
490 default:
491 dev_err(player->dev, "subframe format not supported");
492 return -EINVAL;
493 }
494
495 /* Configure data memory format */
496 switch (runtime->format) {
497 case SNDRV_PCM_FORMAT_S16_LE:
498 /* One data word contains two samples */
499 SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
500 break;
501
502 case SNDRV_PCM_FORMAT_S32_LE:
503 /*
504 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
505 * on the left than zeros (if less than 32 bytes)"... ;-)
506 */
507 SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
508 break;
509
510 default:
511 dev_err(player->dev, "format not supported");
512 return -EINVAL;
513 }
514
515 /* Set rounding to off */
516 SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
517
518 /* Set clock divisor */
519 SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / (2 * output_frame_size));
520
521 /* Number of channelsmust be even*/
522 if ((runtime->channels % 2) || (runtime->channels < 2) ||
523 (runtime->channels > 10)) {
524 dev_err(player->dev, "%s: invalid nb of channels", __func__);
525 return -EINVAL;
526 }
527
528 SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
529
530 /* Set 1-bit audio format to disabled */
531 SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
532
533 SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
534 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
535
536 /* No iec958 formatting as outputting to DAC */
537 SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
538
539 return 0;
540}
541
542/*
543 * ALSA uniperipheral iec958 controls
544 */
545static int uni_player_ctl_iec958_info(struct snd_kcontrol *kcontrol,
546 struct snd_ctl_elem_info *uinfo)
547{
548 uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
549 uinfo->count = 1;
550
551 return 0;
552}
553
554static int uni_player_ctl_iec958_get(struct snd_kcontrol *kcontrol,
555 struct snd_ctl_elem_value *ucontrol)
556{
557 struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
558 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
559 struct uniperif *player = priv->dai_data.uni;
560 struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
561
562 mutex_lock(&player->ctrl_lock);
563 ucontrol->value.iec958.status[0] = iec958->status[0];
564 ucontrol->value.iec958.status[1] = iec958->status[1];
565 ucontrol->value.iec958.status[2] = iec958->status[2];
566 ucontrol->value.iec958.status[3] = iec958->status[3];
567 mutex_unlock(&player->ctrl_lock);
568 return 0;
569}
570
571static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
572 struct snd_ctl_elem_value *ucontrol)
573{
574 struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
575 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
576 struct uniperif *player = priv->dai_data.uni;
577 struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
578
579 mutex_lock(&player->ctrl_lock);
580 iec958->status[0] = ucontrol->value.iec958.status[0];
581 iec958->status[1] = ucontrol->value.iec958.status[1];
582 iec958->status[2] = ucontrol->value.iec958.status[2];
583 iec958->status[3] = ucontrol->value.iec958.status[3];
584 mutex_unlock(&player->ctrl_lock);
585
586 uni_player_set_channel_status(player, NULL);
587
588 return 0;
589}
590
591static struct snd_kcontrol_new uni_player_iec958_ctl = {
592 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
593 .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
594 .info = uni_player_ctl_iec958_info,
595 .get = uni_player_ctl_iec958_get,
596 .put = uni_player_ctl_iec958_put,
597};
598
599/*
600 * uniperif rate adjustement control
601 */
602static int snd_sti_clk_adjustment_info(struct snd_kcontrol *kcontrol,
603 struct snd_ctl_elem_info *uinfo)
604{
605 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
606 uinfo->count = 1;
607 uinfo->value.integer.min = UNIPERIF_PLAYER_CLK_ADJ_MIN;
608 uinfo->value.integer.max = UNIPERIF_PLAYER_CLK_ADJ_MAX;
609 uinfo->value.integer.step = 1;
610
611 return 0;
612}
613
614static int snd_sti_clk_adjustment_get(struct snd_kcontrol *kcontrol,
615 struct snd_ctl_elem_value *ucontrol)
616{
617 struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
618 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
619 struct uniperif *player = priv->dai_data.uni;
620
621 mutex_lock(&player->ctrl_lock);
622 ucontrol->value.integer.value[0] = player->clk_adj;
623 mutex_unlock(&player->ctrl_lock);
624
625 return 0;
626}
627
628static int snd_sti_clk_adjustment_put(struct snd_kcontrol *kcontrol,
629 struct snd_ctl_elem_value *ucontrol)
630{
631 struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
632 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
633 struct uniperif *player = priv->dai_data.uni;
634 int ret = 0;
635
636 if ((ucontrol->value.integer.value[0] < UNIPERIF_PLAYER_CLK_ADJ_MIN) ||
637 (ucontrol->value.integer.value[0] > UNIPERIF_PLAYER_CLK_ADJ_MAX))
638 return -EINVAL;
639
640 mutex_lock(&player->ctrl_lock);
641 player->clk_adj = ucontrol->value.integer.value[0];
642
643 if (player->mclk)
644 ret = uni_player_clk_set_rate(player, player->mclk);
645 mutex_unlock(&player->ctrl_lock);
646
647 return ret;
648}
649
650static struct snd_kcontrol_new uni_player_clk_adj_ctl = {
651 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
652 .name = "PCM Playback Oversampling Freq. Adjustment",
653 .info = snd_sti_clk_adjustment_info,
654 .get = snd_sti_clk_adjustment_get,
655 .put = snd_sti_clk_adjustment_put,
656};
657
658static struct snd_kcontrol_new *snd_sti_pcm_ctl[] = {
659 &uni_player_clk_adj_ctl,
660};
661
662static struct snd_kcontrol_new *snd_sti_iec_ctl[] = {
663 &uni_player_iec958_ctl,
664 &uni_player_clk_adj_ctl,
665};
666
667static int uni_player_startup(struct snd_pcm_substream *substream,
668 struct snd_soc_dai *dai)
669{
670 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
671 struct uniperif *player = priv->dai_data.uni;
672
673 player->clk_adj = 0;
674
675 return 0;
676}
677
678static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
679 unsigned int freq, int dir)
680{
681 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
682 struct uniperif *player = priv->dai_data.uni;
683 int ret;
684
685 if (dir == SND_SOC_CLOCK_IN)
686 return 0;
687
688 if (clk_id != 0)
689 return -EINVAL;
690
691 mutex_lock(&player->ctrl_lock);
692 ret = uni_player_clk_set_rate(player, freq);
693 if (!ret)
694 player->mclk = freq;
695 mutex_unlock(&player->ctrl_lock);
696
697 return ret;
698}
699
700static int uni_player_prepare(struct snd_pcm_substream *substream,
701 struct snd_soc_dai *dai)
702{
703 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
704 struct uniperif *player = priv->dai_data.uni;
705 struct snd_pcm_runtime *runtime = substream->runtime;
706 int transfer_size, trigger_limit;
707 int ret;
708
709 /* The player should be stopped */
710 if (player->state != UNIPERIF_STATE_STOPPED) {
711 dev_err(player->dev, "%s: invalid player state %d", __func__,
712 player->state);
713 return -EINVAL;
714 }
715
716 /* Calculate transfer size (in fifo cells and bytes) for frame count */
717 transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
718
719 /* Calculate number of empty cells available before asserting DREQ */
720 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
721 trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
722 } else {
723 /*
724 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
725 * FDMA_TRIGGER_LIMIT also controls when the state switches
726 * from OFF or STANDBY to AUDIO DATA.
727 */
728 trigger_limit = transfer_size;
729 }
730
731 /* Trigger limit must be an even number */
732 if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
733 (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
734 dev_err(player->dev, "invalid trigger limit %d", trigger_limit);
735 return -EINVAL;
736 }
737
738 SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
739
740 /* Uniperipheral setup depends on player type */
741 switch (player->info->player_type) {
742 case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI:
743 ret = uni_player_prepare_iec958(player, runtime);
744 break;
745 case SND_ST_UNIPERIF_PLAYER_TYPE_PCM:
746 ret = uni_player_prepare_pcm(player, runtime);
747 break;
748 case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF:
749 ret = uni_player_prepare_iec958(player, runtime);
750 break;
751 default:
752 dev_err(player->dev, "invalid player type");
753 return -EINVAL;
754 }
755
756 if (ret)
757 return ret;
758
759 switch (player->daifmt & SND_SOC_DAIFMT_INV_MASK) {
760 case SND_SOC_DAIFMT_NB_NF:
761 SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
762 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
763 break;
764 case SND_SOC_DAIFMT_NB_IF:
765 SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
766 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
767 break;
768 case SND_SOC_DAIFMT_IB_NF:
769 SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
770 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
771 break;
772 case SND_SOC_DAIFMT_IB_IF:
773 SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
774 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
775 break;
776 }
777
778 switch (player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
779 case SND_SOC_DAIFMT_I2S:
780 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
781 SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(player);
782 break;
783 case SND_SOC_DAIFMT_LEFT_J:
784 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
785 SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
786 break;
787 case SND_SOC_DAIFMT_RIGHT_J:
788 SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(player);
789 SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
790 break;
791 default:
792 dev_err(player->dev, "format not supported");
793 return -EINVAL;
794 }
795
796 SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
797
798 /* Reset uniperipheral player */
799 SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
800
801 return reset_player(player);
802}
803
804static int uni_player_start(struct uniperif *player)
805{
806 int ret;
807
808 /* The player should be stopped */
809 if (player->state != UNIPERIF_STATE_STOPPED) {
810 dev_err(player->dev, "%s: invalid player state", __func__);
811 return -EINVAL;
812 }
813
814 ret = clk_prepare_enable(player->clk);
815 if (ret) {
816 dev_err(player->dev, "%s: Failed to enable clock", __func__);
817 return ret;
818 }
819
820 /* Clear any pending interrupts */
821 SET_UNIPERIF_ITS_BCLR(player, GET_UNIPERIF_ITS(player));
822
823 /* Set the interrupt mask */
824 SET_UNIPERIF_ITM_BSET_DMA_ERROR(player);
825 SET_UNIPERIF_ITM_BSET_FIFO_ERROR(player);
826
827 /* Enable underflow recovery interrupts */
828 if (player->info->underflow_enabled) {
829 SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(player);
830 SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
831 }
832
833 /* Reset uniperipheral player */
834 SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
835
836 ret = reset_player(player);
837 if (ret < 0)
838 return ret;
839
840 /*
841 * Does not use IEC61937 features of the uniperipheral hardware.
842 * Instead it performs IEC61937 in software and inserts it directly
843 * into the audio data stream. As such, when encoded mode is selected,
844 * linear pcm mode is still used, but with the differences of the
845 * channel status bits set for encoded mode and the validity bits set.
846 */
847 SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(player);
848
849 /*
850 * If iec958 formatting is required for hdmi or spdif, then it must be
851 * enabled after the operation mode is set. If set prior to this, it
852 * will not take affect and hang the player.
853 */
854 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
855 if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player))
856 SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
857
858 /* Force channel status update (no update if clk disable) */
859 if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
860 SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
861 else
862 SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
863
864 /* Update state to started */
865 player->state = UNIPERIF_STATE_STARTED;
866
867 return 0;
868}
869
870static int uni_player_stop(struct uniperif *player)
871{
872 int ret;
873
874 /* The player should not be in stopped state */
875 if (player->state == UNIPERIF_STATE_STOPPED) {
876 dev_err(player->dev, "%s: invalid player state", __func__);
877 return -EINVAL;
878 }
879
880 /* Turn the player off */
881 SET_UNIPERIF_CTRL_OPERATION_OFF(player);
882
883 /* Soft reset the player */
884 SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
885
886 ret = reset_player(player);
887 if (ret < 0)
888 return ret;
889
890 /* Disable interrupts */
891 SET_UNIPERIF_ITM_BCLR(player, GET_UNIPERIF_ITM(player));
892
893 /* Disable clock */
894 clk_disable_unprepare(player->clk);
895
896 /* Update state to stopped and return */
897 player->state = UNIPERIF_STATE_STOPPED;
898
899 return 0;
900}
901
902int uni_player_resume(struct uniperif *player)
903{
904 int ret;
905
906 /* Select the frequency synthesizer clock */
907 if (player->clk_sel) {
908 ret = regmap_field_write(player->clk_sel, 1);
909 if (ret) {
910 dev_err(player->dev,
911 "%s: Failed to select freq synth clock",
912 __func__);
913 return ret;
914 }
915 }
916
917 SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
918 SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
919 SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
920 SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
921
922 return 0;
923}
924EXPORT_SYMBOL_GPL(uni_player_resume);
925
926static int uni_player_trigger(struct snd_pcm_substream *substream,
927 int cmd, struct snd_soc_dai *dai)
928{
929 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
930 struct uniperif *player = priv->dai_data.uni;
931
932 switch (cmd) {
933 case SNDRV_PCM_TRIGGER_START:
934 return uni_player_start(player);
935 case SNDRV_PCM_TRIGGER_STOP:
936 return uni_player_stop(player);
937 case SNDRV_PCM_TRIGGER_RESUME:
938 return uni_player_resume(player);
939 default:
940 return -EINVAL;
941 }
942}
943
944static void uni_player_shutdown(struct snd_pcm_substream *substream,
945 struct snd_soc_dai *dai)
946{
947 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
948 struct uniperif *player = priv->dai_data.uni;
949
950 if (player->state != UNIPERIF_STATE_STOPPED)
951 /* Stop the player */
952 uni_player_stop(player);
953}
954
955static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
956 struct uniperif *player)
957{
958 int bit_offset;
959 struct device_node *node = pdev->dev.of_node;
960 struct regmap *regmap;
961
962 bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id;
963
964 regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
965
966 if (regmap) {
967 struct reg_field regfield =
968 REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
969
970 player->clk_sel = regmap_field_alloc(regmap, regfield);
971 } else {
972 dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
973 return -EINVAL;
974 }
975
976 return 0;
977}
978
979static int uni_player_parse_dt(struct platform_device *pdev,
980 struct uniperif *player)
981{
982 struct uniperif_info *info;
983 struct device *dev = &pdev->dev;
984 struct device_node *pnode = pdev->dev.of_node;
985 const char *mode;
986
987 /* Allocate memory for the info structure */
988 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
989 if (!info)
990 return -ENOMEM;
991
992 of_property_read_u32(pnode, "version", &player->ver);
993 if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
994 dev_err(dev, "Unknown uniperipheral version ");
995 return -EINVAL;
996 }
997 /* Underflow recovery is only supported on later ip revisions */
998 if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
999 info->underflow_enabled = 1;
1000
1001 of_property_read_u32(pnode, "uniperiph-id", &info->id);
1002
1003 /* Read the device mode property */
1004 of_property_read_string(pnode, "mode", &mode);
1005
1006 if (strcasecmp(mode, "hdmi") == 0)
1007 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
1008 else if (strcasecmp(mode, "pcm") == 0)
1009 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM;
1010 else if (strcasecmp(mode, "spdif") == 0)
1011 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF;
1012 else
1013 info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE;
1014
1015 /* Save the info structure */
1016 player->info = info;
1017
1018 /* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */
1019 if (uni_player_parse_dt_clk_glue(pdev, player))
1020 return -EINVAL;
1021
1022 return 0;
1023}
1024
1025static const struct snd_soc_dai_ops uni_player_dai_ops = {
1026 .startup = uni_player_startup,
1027 .shutdown = uni_player_shutdown,
1028 .prepare = uni_player_prepare,
1029 .trigger = uni_player_trigger,
1030 .hw_params = sti_uniperiph_dai_hw_params,
1031 .set_fmt = sti_uniperiph_dai_set_fmt,
1032 .set_sysclk = uni_player_set_sysclk
1033};
1034
1035int uni_player_init(struct platform_device *pdev,
1036 struct uniperif *player)
1037{
1038 int ret = 0;
1039
1040 player->dev = &pdev->dev;
1041 player->state = UNIPERIF_STATE_STOPPED;
1042 player->hw = &uni_player_pcm_hw;
1043 player->dai_ops = &uni_player_dai_ops;
1044
1045 ret = uni_player_parse_dt(pdev, player);
1046
1047 if (ret < 0) {
1048 dev_err(player->dev, "Failed to parse DeviceTree");
1049 return ret;
1050 }
1051
1052 /* Get uniperif resource */
1053 player->clk = of_clk_get(pdev->dev.of_node, 0);
1054 if (IS_ERR(player->clk))
1055 ret = PTR_ERR(player->clk);
1056
1057 /* Select the frequency synthesizer clock */
1058 if (player->clk_sel) {
1059 ret = regmap_field_write(player->clk_sel, 1);
1060 if (ret) {
1061 dev_err(player->dev,
1062 "%s: Failed to select freq synth clock",
1063 __func__);
1064 return ret;
1065 }
1066 }
1067
1068 ret = devm_request_irq(&pdev->dev, player->irq,
1069 uni_player_irq_handler, IRQF_SHARED,
1070 dev_name(&pdev->dev), player);
1071 if (ret < 0)
1072 return ret;
1073
1074 mutex_init(&player->ctrl_lock);
1075
1076 /* Ensure that disabled by default */
1077 SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
1078 SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
1079 SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
1080 SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
1081
1082 if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) {
1083 /* Set default iec958 status bits */
1084
1085 /* Consumer, PCM, copyright, 2ch, mode 0 */
1086 player->stream_settings.iec958.status[0] = 0x00;
1087 /* Broadcast reception category */
1088 player->stream_settings.iec958.status[1] =
1089 IEC958_AES1_CON_GENERAL;
1090 /* Do not take into account source or channel number */
1091 player->stream_settings.iec958.status[2] =
1092 IEC958_AES2_CON_SOURCE_UNSPEC;
1093 /* Sampling frequency not indicated */
1094 player->stream_settings.iec958.status[3] =
1095 IEC958_AES3_CON_FS_NOTID;
1096 /* Max sample word 24-bit, sample word length not indicated */
1097 player->stream_settings.iec958.status[4] =
1098 IEC958_AES4_CON_MAX_WORDLEN_24 |
1099 IEC958_AES4_CON_WORDLEN_24_20;
1100
1101 player->num_ctrls = ARRAY_SIZE(snd_sti_iec_ctl);
1102 player->snd_ctrls = snd_sti_iec_ctl[0];
1103 } else {
1104 player->num_ctrls = ARRAY_SIZE(snd_sti_pcm_ctl);
1105 player->snd_ctrls = snd_sti_pcm_ctl[0];
1106 }
1107
1108 return 0;
1109}
1110EXPORT_SYMBOL_GPL(uni_player_init);
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
new file mode 100644
index 000000000000..c502626f339b
--- /dev/null
+++ b/sound/soc/sti/uniperif_reader.c
@@ -0,0 +1,362 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2015
3 * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
4 * for STMicroelectronics.
5 * License terms: GNU General Public License (GPL), version 2
6 */
7
8#include <linux/clk.h>
9#include <linux/delay.h>
10#include <linux/io.h>
11
12#include <sound/soc.h>
13
14#include "uniperif.h"
15
16/*
17 * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
18 * integrate unireader capability in term of rate and supported channels
19 */
20static const struct snd_pcm_hardware uni_reader_pcm_hw = {
21 .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
22 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
23 SNDRV_PCM_INFO_MMAP_VALID,
24 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
25
26 .rates = SNDRV_PCM_RATE_CONTINUOUS,
27 .rate_min = 8000,
28 .rate_max = 96000,
29
30 .channels_min = 2,
31 .channels_max = 8,
32
33 .periods_min = 2,
34 .periods_max = 48,
35
36 .period_bytes_min = 128,
37 .period_bytes_max = 64 * PAGE_SIZE,
38 .buffer_bytes_max = 256 * PAGE_SIZE
39};
40
41/*
42 * uni_reader_irq_handler
43 * In case of error audio stream is stopped; stop action is protected via PCM
44 * stream lock to avoid race condition with trigger callback.
45 */
46static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
47{
48 irqreturn_t ret = IRQ_NONE;
49 struct uniperif *reader = dev_id;
50 unsigned int status;
51
52 if (reader->state == UNIPERIF_STATE_STOPPED) {
53 /* Unexpected IRQ: do nothing */
54 dev_warn(reader->dev, "unexpected IRQ ");
55 return IRQ_HANDLED;
56 }
57
58 /* Get interrupt status & clear them immediately */
59 status = GET_UNIPERIF_ITS(reader);
60 SET_UNIPERIF_ITS_BCLR(reader, status);
61
62 /* Check for fifo overflow error */
63 if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
64 dev_err(reader->dev, "FIFO error detected");
65
66 snd_pcm_stream_lock(reader->substream);
67 snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
68 snd_pcm_stream_unlock(reader->substream);
69
70 return IRQ_HANDLED;
71 }
72
73 return ret;
74}
75
76static int uni_reader_prepare(struct snd_pcm_substream *substream,
77 struct snd_soc_dai *dai)
78{
79 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
80 struct uniperif *reader = priv->dai_data.uni;
81 struct snd_pcm_runtime *runtime = substream->runtime;
82 int transfer_size, trigger_limit;
83 int slot_width;
84 int count = 10;
85
86 /* The reader should be stopped */
87 if (reader->state != UNIPERIF_STATE_STOPPED) {
88 dev_err(reader->dev, "%s: invalid reader state %d", __func__,
89 reader->state);
90 return -EINVAL;
91 }
92
93 /* Calculate transfer size (in fifo cells and bytes) for frame count */
94 transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
95
96 /* Calculate number of empty cells available before asserting DREQ */
97 if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
98 trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
99 else
100 /*
101 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
102 * FDMA_TRIGGER_LIMIT also controls when the state switches
103 * from OFF or STANDBY to AUDIO DATA.
104 */
105 trigger_limit = transfer_size;
106
107 /* Trigger limit must be an even number */
108 if ((!trigger_limit % 2) ||
109 (trigger_limit != 1 && transfer_size % 2) ||
110 (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
111 dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
112 return -EINVAL;
113 }
114
115 SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
116
117 switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
118 case SND_SOC_DAIFMT_IB_IF:
119 case SND_SOC_DAIFMT_NB_IF:
120 SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
121 break;
122 default:
123 SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
124 }
125
126 /* Force slot width to 32 in I2S mode */
127 if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
128 == SND_SOC_DAIFMT_I2S) {
129 slot_width = 32;
130 } else {
131 switch (runtime->format) {
132 case SNDRV_PCM_FORMAT_S16_LE:
133 slot_width = 16;
134 break;
135 default:
136 slot_width = 32;
137 break;
138 }
139 }
140
141 /* Number of bits per subframe (i.e one channel sample) on input. */
142 switch (slot_width) {
143 case 32:
144 SET_UNIPERIF_I2S_FMT_NBIT_32(reader);
145 SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
146 break;
147 case 16:
148 SET_UNIPERIF_I2S_FMT_NBIT_16(reader);
149 SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
150 break;
151 default:
152 dev_err(reader->dev, "subframe format not supported");
153 return -EINVAL;
154 }
155
156 /* Configure data memory format */
157 switch (runtime->format) {
158 case SNDRV_PCM_FORMAT_S16_LE:
159 /* One data word contains two samples */
160 SET_UNIPERIF_CONFIG_MEM_FMT_16_16(reader);
161 break;
162
163 case SNDRV_PCM_FORMAT_S32_LE:
164 /*
165 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
166 * on the MSB then zeros (if less than 32 bytes)"...
167 */
168 SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
169 break;
170
171 default:
172 dev_err(reader->dev, "format not supported");
173 return -EINVAL;
174 }
175
176 switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
177 case SND_SOC_DAIFMT_I2S:
178 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
179 SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(reader);
180 break;
181 case SND_SOC_DAIFMT_LEFT_J:
182 SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
183 SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
184 break;
185 case SND_SOC_DAIFMT_RIGHT_J:
186 SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(reader);
187 SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
188 break;
189 default:
190 dev_err(reader->dev, "format not supported");
191 return -EINVAL;
192 }
193
194 SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
195
196 /* Data clocking (changing) on the rising edge */
197 SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
198
199 /* Number of channels must be even */
200
201 if ((runtime->channels % 2) || (runtime->channels < 2) ||
202 (runtime->channels > 10)) {
203 dev_err(reader->dev, "%s: invalid nb of channels", __func__);
204 return -EINVAL;
205 }
206
207 SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
208
209 /* Clear any pending interrupts */
210 SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
211
212 SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(reader, 0);
213
214 /* Set the interrupt mask */
215 SET_UNIPERIF_ITM_BSET_DMA_ERROR(reader);
216 SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
217 SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(reader);
218
219 /* Enable underflow recovery interrupts */
220 if (reader->info->underflow_enabled) {
221 SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(reader);
222 SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(reader);
223 }
224
225 /* Reset uniperipheral reader */
226 SET_UNIPERIF_SOFT_RST_SOFT_RST(reader);
227
228 while (GET_UNIPERIF_SOFT_RST_SOFT_RST(reader)) {
229 udelay(5);
230 count--;
231 }
232 if (!count) {
233 dev_err(reader->dev, "Failed to reset uniperif");
234 return -EIO;
235 }
236
237 return 0;
238}
239
240static int uni_reader_start(struct uniperif *reader)
241{
242 /* The reader should be stopped */
243 if (reader->state != UNIPERIF_STATE_STOPPED) {
244 dev_err(reader->dev, "%s: invalid reader state", __func__);
245 return -EINVAL;
246 }
247
248 /* Enable reader interrupts (and clear possible stalled ones) */
249 SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(reader);
250 SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
251
252 /* Launch the reader */
253 SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(reader);
254
255 /* Update state to started */
256 reader->state = UNIPERIF_STATE_STARTED;
257 return 0;
258}
259
260static int uni_reader_stop(struct uniperif *reader)
261{
262 /* The reader should not be in stopped state */
263 if (reader->state == UNIPERIF_STATE_STOPPED) {
264 dev_err(reader->dev, "%s: invalid reader state", __func__);
265 return -EINVAL;
266 }
267
268 /* Turn the reader off */
269 SET_UNIPERIF_CTRL_OPERATION_OFF(reader);
270
271 /* Disable interrupts */
272 SET_UNIPERIF_ITM_BCLR(reader, GET_UNIPERIF_ITM(reader));
273
274 /* Update state to stopped and return */
275 reader->state = UNIPERIF_STATE_STOPPED;
276
277 return 0;
278}
279
280static int uni_reader_trigger(struct snd_pcm_substream *substream,
281 int cmd, struct snd_soc_dai *dai)
282{
283 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
284 struct uniperif *reader = priv->dai_data.uni;
285
286 switch (cmd) {
287 case SNDRV_PCM_TRIGGER_START:
288 return uni_reader_start(reader);
289 case SNDRV_PCM_TRIGGER_STOP:
290 return uni_reader_stop(reader);
291 default:
292 return -EINVAL;
293 }
294}
295
296static void uni_reader_shutdown(struct snd_pcm_substream *substream,
297 struct snd_soc_dai *dai)
298{
299 struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
300 struct uniperif *reader = priv->dai_data.uni;
301
302 if (reader->state != UNIPERIF_STATE_STOPPED) {
303 /* Stop the reader */
304 uni_reader_stop(reader);
305 }
306}
307
308static int uni_reader_parse_dt(struct platform_device *pdev,
309 struct uniperif *reader)
310{
311 struct uniperif_info *info;
312 struct device_node *node = pdev->dev.of_node;
313
314 /* Allocate memory for the info structure */
315 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
316 if (!info)
317 return -ENOMEM;
318
319 of_property_read_u32(node, "version", &reader->ver);
320
321 /* Save the info structure */
322 reader->info = info;
323
324 return 0;
325}
326
327static const struct snd_soc_dai_ops uni_reader_dai_ops = {
328 .shutdown = uni_reader_shutdown,
329 .prepare = uni_reader_prepare,
330 .trigger = uni_reader_trigger,
331 .hw_params = sti_uniperiph_dai_hw_params,
332 .set_fmt = sti_uniperiph_dai_set_fmt,
333};
334
335int uni_reader_init(struct platform_device *pdev,
336 struct uniperif *reader)
337{
338 int ret = 0;
339
340 reader->dev = &pdev->dev;
341 reader->state = UNIPERIF_STATE_STOPPED;
342 reader->hw = &uni_reader_pcm_hw;
343 reader->dai_ops = &uni_reader_dai_ops;
344
345 dev_err(reader->dev, "%s: enter\n", __func__);
346 ret = uni_reader_parse_dt(pdev, reader);
347 if (ret < 0) {
348 dev_err(reader->dev, "Failed to parse DeviceTree");
349 return ret;
350 }
351
352 ret = devm_request_irq(&pdev->dev, reader->irq,
353 uni_reader_irq_handler, IRQF_SHARED,
354 dev_name(&pdev->dev), reader);
355 if (ret < 0) {
356 dev_err(&pdev->dev, "Failed to request IRQ");
357 return -EBUSY;
358 }
359
360 return 0;
361}
362EXPORT_SYMBOL_GPL(uni_reader_init);
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 98d96e1b17e0..1930c42e1f55 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394 zx_i2s->mapbase = res->start; 394 zx_i2s->mapbase = res->start;
395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res); 395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
396 if (!zx_i2s->reg_base) { 396 if (IS_ERR(zx_i2s->reg_base)) {
397 dev_err(&pdev->dev, "ioremap failed!\n"); 397 dev_err(&pdev->dev, "ioremap failed!\n");
398 return -EIO; 398 return PTR_ERR(zx_i2s->reg_base);
399 } 399 }
400 400
401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL); 401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c
index 11a0e46a1156..26265ce4caca 100644
--- a/sound/soc/zte/zx296702-spdif.c
+++ b/sound/soc/zte/zx296702-spdif.c
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
323 zx_spdif->mapbase = res->start; 323 zx_spdif->mapbase = res->start;
324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res); 324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
325 if (!zx_spdif->reg_base) { 325 if (IS_ERR(zx_spdif->reg_base)) {
326 dev_err(&pdev->dev, "ioremap failed!\n"); 326 dev_err(&pdev->dev, "ioremap failed!\n");
327 return -EIO; 327 return PTR_ERR(zx_spdif->reg_base);
328 } 328 }
329 329
330 zx_spdif_dev_init(zx_spdif->reg_base); 330 zx_spdif_dev_init(zx_spdif->reg_base);
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 1b1a89e80d13..784ceb85b2d9 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
956 if (!amd->regs) { 956 if (!amd->regs) {
957 snd_printk(KERN_ERR 957 snd_printk(KERN_ERR
958 "amd7930-%d: Unable to map chip registers.\n", dev); 958 "amd7930-%d: Unable to map chip registers.\n", dev);
959 kfree(amd);
959 return -EIO; 960 return -EIO;
960 } 961 }
961 962
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1fab9778807a..0450593980fd 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
638 int err = -ENODEV; 638 int err = -ENODEV;
639 639
640 down_read(&chip->shutdown_rwsem); 640 down_read(&chip->shutdown_rwsem);
641 if (chip->probing && chip->in_pm) 641 if (chip->probing || chip->in_pm)
642 err = 0; 642 err = 0;
643 else if (!chip->shutdown) 643 else if (!chip->shutdown)
644 err = usb_autopm_get_interface(chip->pm_intf); 644 err = usb_autopm_get_interface(chip->pm_intf);
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 8461d6bf992f..204cc074adb9 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
186 int ret = 0; 186 int ret = 0;
187 187
188 spin_lock_irqsave(&pstr->lock, flags); 188 spin_lock_irqsave(&pstr->lock, flags);
189 if (!test_and_set_bit(type, &pstr->running)) { 189 if (!test_and_set_bit(type, &pstr->running) &&
190 if (pstr->active_urbs || pstr->unlink_urbs) { 190 !(pstr->active_urbs || pstr->unlink_urbs)) {
191 ret = -EBUSY;
192 goto error;
193 }
194
195 pstr->count = 0; 191 pstr->count = 0;
196 /* Submit all currently available URBs */ 192 /* Submit all currently available URBs */
197 if (direction == SNDRV_PCM_STREAM_PLAYBACK) 193 if (direction == SNDRV_PCM_STREAM_PLAYBACK)
@@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
199 else 195 else
200 ret = line6_submit_audio_in_all_urbs(line6pcm); 196 ret = line6_submit_audio_in_all_urbs(line6pcm);
201 } 197 }
202 error:
203 if (ret < 0) 198 if (ret < 0)
204 clear_bit(type, &pstr->running); 199 clear_bit(type, &pstr->running);
205 spin_unlock_irqrestore(&pstr->lock, flags); 200 spin_unlock_irqrestore(&pstr->lock, flags);
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index e5000da9e9d7..6a803eff87f7 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
341 { 0 } 341 { 0 }
342}; 342};
343 343
344/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
345static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
346static struct usbmix_name_map bose_companion5_map[] = {
347 { 3, NULL, .dB = &bose_companion5_dB },
348 { 0 } /* terminator */
349};
350
351/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
352static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
353static struct usbmix_name_map dragonfly_1_2_map[] = {
354 { 7, NULL, .dB = &dragonfly_1_2_dB },
355 { 0 } /* terminator */
356};
357
344/* 358/*
345 * Control map entries 359 * Control map entries
346 */ 360 */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
451 .id = USB_ID(0x25c4, 0x0003), 465 .id = USB_ID(0x25c4, 0x0003),
452 .map = scms_usb3318_map, 466 .map = scms_usb3318_map,
453 }, 467 },
468 {
469 /* Bose Companion 5 */
470 .id = USB_ID(0x05a7, 0x1020),
471 .map = bose_companion5_map,
472 },
473 {
474 /* Dragonfly DAC 1.2 */
475 .id = USB_ID(0x21b4, 0x0081),
476 .map = dragonfly_1_2_map,
477 },
454 { 0 } /* terminator */ 478 { 0 } /* terminator */
455}; 479};
456 480
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 2f6d3e9a1bcd..e4756651a52c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
2512 } 2512 }
2513}, 2513},
2514 2514
2515/* Steinberg devices */
2516{
2517 /* Steinberg MI2 */
2518 USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
2519 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
2520 .ifnum = QUIRK_ANY_INTERFACE,
2521 .type = QUIRK_COMPOSITE,
2522 .data = & (const struct snd_usb_audio_quirk[]) {
2523 {
2524 .ifnum = 0,
2525 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2526 },
2527 {
2528 .ifnum = 1,
2529 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2530 },
2531 {
2532 .ifnum = 2,
2533 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2534 },
2535 {
2536 .ifnum = 3,
2537 .type = QUIRK_MIDI_FIXED_ENDPOINT,
2538 .data = &(const struct snd_usb_midi_endpoint_info) {
2539 .out_cables = 0x0001,
2540 .in_cables = 0x0001
2541 }
2542 },
2543 {
2544 .ifnum = -1
2545 }
2546 }
2547 }
2548},
2549{
2550 /* Steinberg MI4 */
2551 USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
2552 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
2553 .ifnum = QUIRK_ANY_INTERFACE,
2554 .type = QUIRK_COMPOSITE,
2555 .data = & (const struct snd_usb_audio_quirk[]) {
2556 {
2557 .ifnum = 0,
2558 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2559 },
2560 {
2561 .ifnum = 1,
2562 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2563 },
2564 {
2565 .ifnum = 2,
2566 .type = QUIRK_AUDIO_STANDARD_INTERFACE
2567 },
2568 {
2569 .ifnum = 3,
2570 .type = QUIRK_MIDI_FIXED_ENDPOINT,
2571 .data = &(const struct snd_usb_midi_endpoint_info) {
2572 .out_cables = 0x0001,
2573 .in_cables = 0x0001
2574 }
2575 },
2576 {
2577 .ifnum = -1
2578 }
2579 }
2580 }
2581},
2582
2515/* TerraTec devices */ 2583/* TerraTec devices */
2516{ 2584{
2517 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), 2585 USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index f0e72674c52d..9098083869c8 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -41,4 +41,62 @@
41 41
42#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 42#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
43 43
44#include <linux/types.h>
45
46static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
47{
48 switch (size) {
49 case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
50 case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
51 case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
52 case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
53 default:
54 barrier();
55 __builtin_memcpy((void *)res, (const void *)p, size);
56 barrier();
57 }
58}
59
60static __always_inline void __write_once_size(volatile void *p, void *res, int size)
61{
62 switch (size) {
63 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
64 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
65 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
66 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
67 default:
68 barrier();
69 __builtin_memcpy((void *)p, (const void *)res, size);
70 barrier();
71 }
72}
73
74/*
75 * Prevent the compiler from merging or refetching reads or writes. The
76 * compiler is also forbidden from reordering successive instances of
77 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
78 * compiler is aware of some particular ordering. One way to make the
79 * compiler aware of ordering is to put the two invocations of READ_ONCE,
80 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
81 *
82 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
83 * data types like structs or unions. If the size of the accessed data
84 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
85 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
86 * compile-time warning.
87 *
88 * Their two major use cases are: (1) Mediating communication between
89 * process-level code and irq/NMI handlers, all running on the same CPU,
90 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
91 * mutilate accesses that either do not require ordering or that interact
92 * with an explicit memory barrier or atomic instruction that provides the
93 * required ordering.
94 */
95
96#define READ_ONCE(x) \
97 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
98
99#define WRITE_ONCE(x, val) \
100 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
101
44#endif /* _TOOLS_LINUX_COMPILER_H */ 102#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
deleted file mode 100644
index d07e586b9ba0..000000000000
--- a/tools/include/linux/export.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef _TOOLS_LINUX_EXPORT_H_
2#define _TOOLS_LINUX_EXPORT_H_
3
4#define EXPORT_SYMBOL(sym)
5#define EXPORT_SYMBOL_GPL(sym)
6#define EXPORT_SYMBOL_GPL_FUTURE(sym)
7#define EXPORT_UNUSED_SYMBOL(sym)
8#define EXPORT_UNUSED_SYMBOL_GPL(sym)
9
10#endif
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
new file mode 100644
index 000000000000..112582253dd0
--- /dev/null
+++ b/tools/include/linux/rbtree.h
@@ -0,0 +1,104 @@
1/*
2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
19 linux/include/linux/rbtree.h
20
21 To use rbtrees you'll have to implement your own insert and search cores.
22 This will avoid us to use callbacks and to drop drammatically performances.
23 I know it's not the cleaner way, but in C (not in C++) to get
24 performances and genericity...
25
26 See Documentation/rbtree.txt for documentation and samples.
27*/
28
29#ifndef __TOOLS_LINUX_PERF_RBTREE_H
30#define __TOOLS_LINUX_PERF_RBTREE_H
31
32#include <linux/kernel.h>
33#include <linux/stddef.h>
34
35struct rb_node {
36 unsigned long __rb_parent_color;
37 struct rb_node *rb_right;
38 struct rb_node *rb_left;
39} __attribute__((aligned(sizeof(long))));
40 /* The alignment might seem pointless, but allegedly CRIS needs it */
41
42struct rb_root {
43 struct rb_node *rb_node;
44};
45
46
47#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
48
49#define RB_ROOT (struct rb_root) { NULL, }
50#define rb_entry(ptr, type, member) container_of(ptr, type, member)
51
52#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
53
54/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
55#define RB_EMPTY_NODE(node) \
56 ((node)->__rb_parent_color == (unsigned long)(node))
57#define RB_CLEAR_NODE(node) \
58 ((node)->__rb_parent_color = (unsigned long)(node))
59
60
61extern void rb_insert_color(struct rb_node *, struct rb_root *);
62extern void rb_erase(struct rb_node *, struct rb_root *);
63
64
65/* Find logical next and previous nodes in a tree */
66extern struct rb_node *rb_next(const struct rb_node *);
67extern struct rb_node *rb_prev(const struct rb_node *);
68extern struct rb_node *rb_first(const struct rb_root *);
69extern struct rb_node *rb_last(const struct rb_root *);
70
71/* Postorder iteration - always visit the parent after its children */
72extern struct rb_node *rb_first_postorder(const struct rb_root *);
73extern struct rb_node *rb_next_postorder(const struct rb_node *);
74
75/* Fast replacement of a single node without remove/rebalance/add/rebalance */
76extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
77 struct rb_root *root);
78
79static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
80 struct rb_node **rb_link)
81{
82 node->__rb_parent_color = (unsigned long)parent;
83 node->rb_left = node->rb_right = NULL;
84
85 *rb_link = node;
86}
87
88#define rb_entry_safe(ptr, type, member) \
89 ({ typeof(ptr) ____ptr = (ptr); \
90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
91 })
92
93
94/*
95 * Handy for checking that we are not deleting an entry that is
96 * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
97 * probably should be moved to lib/rbtree.c...
98 */
99static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
100{
101 rb_erase(n, root);
102 RB_CLEAR_NODE(n);
103}
104#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
new file mode 100644
index 000000000000..43be941db695
--- /dev/null
+++ b/tools/include/linux/rbtree_augmented.h
@@ -0,0 +1,245 @@
1/*
2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4 (C) 2002 David Woodhouse <dwmw2@infradead.org>
5 (C) 2012 Michel Lespinasse <walken@google.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
21 tools/linux/include/linux/rbtree_augmented.h
22
23 Copied from:
24 linux/include/linux/rbtree_augmented.h
25*/
26
27#ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H
28#define _TOOLS_LINUX_RBTREE_AUGMENTED_H
29
30#include <linux/compiler.h>
31#include <linux/rbtree.h>
32
33/*
34 * Please note - only struct rb_augment_callbacks and the prototypes for
35 * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
36 * The rest are implementation details you are not expected to depend on.
37 *
38 * See Documentation/rbtree.txt for documentation and samples.
39 */
40
41struct rb_augment_callbacks {
42 void (*propagate)(struct rb_node *node, struct rb_node *stop);
43 void (*copy)(struct rb_node *old, struct rb_node *new);
44 void (*rotate)(struct rb_node *old, struct rb_node *new);
45};
46
47extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
48 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
49/*
50 * Fixup the rbtree and update the augmented information when rebalancing.
51 *
52 * On insertion, the user must update the augmented information on the path
53 * leading to the inserted node, then call rb_link_node() as usual and
54 * rb_augment_inserted() instead of the usual rb_insert_color() call.
55 * If rb_augment_inserted() rebalances the rbtree, it will callback into
56 * a user provided function to update the augmented information on the
57 * affected subtrees.
58 */
59static inline void
60rb_insert_augmented(struct rb_node *node, struct rb_root *root,
61 const struct rb_augment_callbacks *augment)
62{
63 __rb_insert_augmented(node, root, augment->rotate);
64}
65
66#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
67 rbtype, rbaugmented, rbcompute) \
68static inline void \
69rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
70{ \
71 while (rb != stop) { \
72 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
73 rbtype augmented = rbcompute(node); \
74 if (node->rbaugmented == augmented) \
75 break; \
76 node->rbaugmented = augmented; \
77 rb = rb_parent(&node->rbfield); \
78 } \
79} \
80static inline void \
81rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
82{ \
83 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
84 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
85 new->rbaugmented = old->rbaugmented; \
86} \
87static void \
88rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
89{ \
90 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
91 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
92 new->rbaugmented = old->rbaugmented; \
93 old->rbaugmented = rbcompute(old); \
94} \
95rbstatic const struct rb_augment_callbacks rbname = { \
96 rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
97};
98
99
100#define RB_RED 0
101#define RB_BLACK 1
102
103#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
104
105#define __rb_color(pc) ((pc) & 1)
106#define __rb_is_black(pc) __rb_color(pc)
107#define __rb_is_red(pc) (!__rb_color(pc))
108#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
109#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
110#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
111
112static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
113{
114 rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
115}
116
117static inline void rb_set_parent_color(struct rb_node *rb,
118 struct rb_node *p, int color)
119{
120 rb->__rb_parent_color = (unsigned long)p | color;
121}
122
123static inline void
124__rb_change_child(struct rb_node *old, struct rb_node *new,
125 struct rb_node *parent, struct rb_root *root)
126{
127 if (parent) {
128 if (parent->rb_left == old)
129 parent->rb_left = new;
130 else
131 parent->rb_right = new;
132 } else
133 root->rb_node = new;
134}
135
136extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
137 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
138
139static __always_inline struct rb_node *
140__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
141 const struct rb_augment_callbacks *augment)
142{
143 struct rb_node *child = node->rb_right, *tmp = node->rb_left;
144 struct rb_node *parent, *rebalance;
145 unsigned long pc;
146
147 if (!tmp) {
148 /*
149 * Case 1: node to erase has no more than 1 child (easy!)
150 *
151 * Note that if there is one child it must be red due to 5)
152 * and node must be black due to 4). We adjust colors locally
153 * so as to bypass __rb_erase_color() later on.
154 */
155 pc = node->__rb_parent_color;
156 parent = __rb_parent(pc);
157 __rb_change_child(node, child, parent, root);
158 if (child) {
159 child->__rb_parent_color = pc;
160 rebalance = NULL;
161 } else
162 rebalance = __rb_is_black(pc) ? parent : NULL;
163 tmp = parent;
164 } else if (!child) {
165 /* Still case 1, but this time the child is node->rb_left */
166 tmp->__rb_parent_color = pc = node->__rb_parent_color;
167 parent = __rb_parent(pc);
168 __rb_change_child(node, tmp, parent, root);
169 rebalance = NULL;
170 tmp = parent;
171 } else {
172 struct rb_node *successor = child, *child2;
173 tmp = child->rb_left;
174 if (!tmp) {
175 /*
176 * Case 2: node's successor is its right child
177 *
178 * (n) (s)
179 * / \ / \
180 * (x) (s) -> (x) (c)
181 * \
182 * (c)
183 */
184 parent = successor;
185 child2 = successor->rb_right;
186 augment->copy(node, successor);
187 } else {
188 /*
189 * Case 3: node's successor is leftmost under
190 * node's right child subtree
191 *
192 * (n) (s)
193 * / \ / \
194 * (x) (y) -> (x) (y)
195 * / /
196 * (p) (p)
197 * / /
198 * (s) (c)
199 * \
200 * (c)
201 */
202 do {
203 parent = successor;
204 successor = tmp;
205 tmp = tmp->rb_left;
206 } while (tmp);
207 parent->rb_left = child2 = successor->rb_right;
208 successor->rb_right = child;
209 rb_set_parent(child, successor);
210 augment->copy(node, successor);
211 augment->propagate(parent, successor);
212 }
213
214 successor->rb_left = tmp = node->rb_left;
215 rb_set_parent(tmp, successor);
216
217 pc = node->__rb_parent_color;
218 tmp = __rb_parent(pc);
219 __rb_change_child(node, successor, tmp, root);
220 if (child2) {
221 successor->__rb_parent_color = pc;
222 rb_set_parent_color(child2, parent, RB_BLACK);
223 rebalance = NULL;
224 } else {
225 unsigned long pc2 = successor->__rb_parent_color;
226 successor->__rb_parent_color = pc;
227 rebalance = __rb_is_black(pc2) ? parent : NULL;
228 }
229 tmp = successor;
230 }
231
232 augment->propagate(tmp, NULL);
233 return rebalance;
234}
235
236static __always_inline void
237rb_erase_augmented(struct rb_node *node, struct rb_root *root,
238 const struct rb_augment_callbacks *augment)
239{
240 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
241 if (rebalance)
242 __rb_erase_color(rebalance, root, augment->rotate);
243}
244
245#endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index 8bd960658463..fe1b02c2c95b 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -36,7 +36,7 @@ $(LIBFILE): $(API_IN)
36 36
37clean: 37clean:
38 $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \ 38 $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
39 find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM) 39 find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
40 40
41FORCE: 41FORCE:
42 42
diff --git a/tools/lib/hweight.c b/tools/lib/hweight.c
new file mode 100644
index 000000000000..0b859b884339
--- /dev/null
+++ b/tools/lib/hweight.c
@@ -0,0 +1,62 @@
1#include <linux/bitops.h>
2#include <asm/types.h>
3
4/**
5 * hweightN - returns the hamming weight of a N-bit word
6 * @x: the word to weigh
7 *
8 * The Hamming Weight of a number is the total number of bits set in it.
9 */
10
11unsigned int __sw_hweight32(unsigned int w)
12{
13#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
14 w -= (w >> 1) & 0x55555555;
15 w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
16 w = (w + (w >> 4)) & 0x0f0f0f0f;
17 return (w * 0x01010101) >> 24;
18#else
19 unsigned int res = w - ((w >> 1) & 0x55555555);
20 res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
21 res = (res + (res >> 4)) & 0x0F0F0F0F;
22 res = res + (res >> 8);
23 return (res + (res >> 16)) & 0x000000FF;
24#endif
25}
26
27unsigned int __sw_hweight16(unsigned int w)
28{
29 unsigned int res = w - ((w >> 1) & 0x5555);
30 res = (res & 0x3333) + ((res >> 2) & 0x3333);
31 res = (res + (res >> 4)) & 0x0F0F;
32 return (res + (res >> 8)) & 0x00FF;
33}
34
35unsigned int __sw_hweight8(unsigned int w)
36{
37 unsigned int res = w - ((w >> 1) & 0x55);
38 res = (res & 0x33) + ((res >> 2) & 0x33);
39 return (res + (res >> 4)) & 0x0F;
40}
41
42unsigned long __sw_hweight64(__u64 w)
43{
44#if BITS_PER_LONG == 32
45 return __sw_hweight32((unsigned int)(w >> 32)) +
46 __sw_hweight32((unsigned int)w);
47#elif BITS_PER_LONG == 64
48#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
49 w -= (w >> 1) & 0x5555555555555555ul;
50 w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
51 w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
52 return (w * 0x0101010101010101ul) >> 56;
53#else
54 __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
55 res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
56 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
57 res = res + (res >> 8);
58 res = res + (res >> 16);
59 return (res + (res >> 32)) & 0x00000000000000FFul;
60#endif
61#endif
62}
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
new file mode 100644
index 000000000000..17c2b596f043
--- /dev/null
+++ b/tools/lib/rbtree.c
@@ -0,0 +1,548 @@
1/*
2 Red Black Trees
3 (C) 1999 Andrea Arcangeli <andrea@suse.de>
4 (C) 2002 David Woodhouse <dwmw2@infradead.org>
5 (C) 2012 Michel Lespinasse <walken@google.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
21 linux/lib/rbtree.c
22*/
23
24#include <linux/rbtree_augmented.h>
25
26/*
27 * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
28 *
29 * 1) A node is either red or black
30 * 2) The root is black
31 * 3) All leaves (NULL) are black
32 * 4) Both children of every red node are black
33 * 5) Every simple path from root to leaves contains the same number
34 * of black nodes.
35 *
36 * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
37 * consecutive red nodes in a path and every red node is therefore followed by
38 * a black. So if B is the number of black nodes on every simple path (as per
39 * 5), then the longest possible path due to 4 is 2B.
40 *
41 * We shall indicate color with case, where black nodes are uppercase and red
42 * nodes will be lowercase. Unknown color nodes shall be drawn as red within
43 * parentheses and have some accompanying text comment.
44 */
45
46static inline void rb_set_black(struct rb_node *rb)
47{
48 rb->__rb_parent_color |= RB_BLACK;
49}
50
51static inline struct rb_node *rb_red_parent(struct rb_node *red)
52{
53 return (struct rb_node *)red->__rb_parent_color;
54}
55
56/*
57 * Helper function for rotations:
58 * - old's parent and color get assigned to new
59 * - old gets assigned new as a parent and 'color' as a color.
60 */
61static inline void
62__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
63 struct rb_root *root, int color)
64{
65 struct rb_node *parent = rb_parent(old);
66 new->__rb_parent_color = old->__rb_parent_color;
67 rb_set_parent_color(old, new, color);
68 __rb_change_child(old, new, parent, root);
69}
70
71static __always_inline void
72__rb_insert(struct rb_node *node, struct rb_root *root,
73 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
74{
75 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
76
77 while (true) {
78 /*
79 * Loop invariant: node is red
80 *
81 * If there is a black parent, we are done.
82 * Otherwise, take some corrective action as we don't
83 * want a red root or two consecutive red nodes.
84 */
85 if (!parent) {
86 rb_set_parent_color(node, NULL, RB_BLACK);
87 break;
88 } else if (rb_is_black(parent))
89 break;
90
91 gparent = rb_red_parent(parent);
92
93 tmp = gparent->rb_right;
94 if (parent != tmp) { /* parent == gparent->rb_left */
95 if (tmp && rb_is_red(tmp)) {
96 /*
97 * Case 1 - color flips
98 *
99 * G g
100 * / \ / \
101 * p u --> P U
102 * / /
103 * n n
104 *
105 * However, since g's parent might be red, and
106 * 4) does not allow this, we need to recurse
107 * at g.
108 */
109 rb_set_parent_color(tmp, gparent, RB_BLACK);
110 rb_set_parent_color(parent, gparent, RB_BLACK);
111 node = gparent;
112 parent = rb_parent(node);
113 rb_set_parent_color(node, parent, RB_RED);
114 continue;
115 }
116
117 tmp = parent->rb_right;
118 if (node == tmp) {
119 /*
120 * Case 2 - left rotate at parent
121 *
122 * G G
123 * / \ / \
124 * p U --> n U
125 * \ /
126 * n p
127 *
128 * This still leaves us in violation of 4), the
129 * continuation into Case 3 will fix that.
130 */
131 parent->rb_right = tmp = node->rb_left;
132 node->rb_left = parent;
133 if (tmp)
134 rb_set_parent_color(tmp, parent,
135 RB_BLACK);
136 rb_set_parent_color(parent, node, RB_RED);
137 augment_rotate(parent, node);
138 parent = node;
139 tmp = node->rb_right;
140 }
141
142 /*
143 * Case 3 - right rotate at gparent
144 *
145 * G P
146 * / \ / \
147 * p U --> n g
148 * / \
149 * n U
150 */
151 gparent->rb_left = tmp; /* == parent->rb_right */
152 parent->rb_right = gparent;
153 if (tmp)
154 rb_set_parent_color(tmp, gparent, RB_BLACK);
155 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
156 augment_rotate(gparent, parent);
157 break;
158 } else {
159 tmp = gparent->rb_left;
160 if (tmp && rb_is_red(tmp)) {
161 /* Case 1 - color flips */
162 rb_set_parent_color(tmp, gparent, RB_BLACK);
163 rb_set_parent_color(parent, gparent, RB_BLACK);
164 node = gparent;
165 parent = rb_parent(node);
166 rb_set_parent_color(node, parent, RB_RED);
167 continue;
168 }
169
170 tmp = parent->rb_left;
171 if (node == tmp) {
172 /* Case 2 - right rotate at parent */
173 parent->rb_left = tmp = node->rb_right;
174 node->rb_right = parent;
175 if (tmp)
176 rb_set_parent_color(tmp, parent,
177 RB_BLACK);
178 rb_set_parent_color(parent, node, RB_RED);
179 augment_rotate(parent, node);
180 parent = node;
181 tmp = node->rb_left;
182 }
183
184 /* Case 3 - left rotate at gparent */
185 gparent->rb_right = tmp; /* == parent->rb_left */
186 parent->rb_left = gparent;
187 if (tmp)
188 rb_set_parent_color(tmp, gparent, RB_BLACK);
189 __rb_rotate_set_parents(gparent, parent, root, RB_RED);
190 augment_rotate(gparent, parent);
191 break;
192 }
193 }
194}
195
196/*
197 * Inline version for rb_erase() use - we want to be able to inline
198 * and eliminate the dummy_rotate callback there
199 */
200static __always_inline void
201____rb_erase_color(struct rb_node *parent, struct rb_root *root,
202 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
203{
204 struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
205
206 while (true) {
207 /*
208 * Loop invariants:
209 * - node is black (or NULL on first iteration)
210 * - node is not the root (parent is not NULL)
211 * - All leaf paths going through parent and node have a
212 * black node count that is 1 lower than other leaf paths.
213 */
214 sibling = parent->rb_right;
215 if (node != sibling) { /* node == parent->rb_left */
216 if (rb_is_red(sibling)) {
217 /*
218 * Case 1 - left rotate at parent
219 *
220 * P S
221 * / \ / \
222 * N s --> p Sr
223 * / \ / \
224 * Sl Sr N Sl
225 */
226 parent->rb_right = tmp1 = sibling->rb_left;
227 sibling->rb_left = parent;
228 rb_set_parent_color(tmp1, parent, RB_BLACK);
229 __rb_rotate_set_parents(parent, sibling, root,
230 RB_RED);
231 augment_rotate(parent, sibling);
232 sibling = tmp1;
233 }
234 tmp1 = sibling->rb_right;
235 if (!tmp1 || rb_is_black(tmp1)) {
236 tmp2 = sibling->rb_left;
237 if (!tmp2 || rb_is_black(tmp2)) {
238 /*
239 * Case 2 - sibling color flip
240 * (p could be either color here)
241 *
242 * (p) (p)
243 * / \ / \
244 * N S --> N s
245 * / \ / \
246 * Sl Sr Sl Sr
247 *
248 * This leaves us violating 5) which
249 * can be fixed by flipping p to black
250 * if it was red, or by recursing at p.
251 * p is red when coming from Case 1.
252 */
253 rb_set_parent_color(sibling, parent,
254 RB_RED);
255 if (rb_is_red(parent))
256 rb_set_black(parent);
257 else {
258 node = parent;
259 parent = rb_parent(node);
260 if (parent)
261 continue;
262 }
263 break;
264 }
265 /*
266 * Case 3 - right rotate at sibling
267 * (p could be either color here)
268 *
269 * (p) (p)
270 * / \ / \
271 * N S --> N Sl
272 * / \ \
273 * sl Sr s
274 * \
275 * Sr
276 */
277 sibling->rb_left = tmp1 = tmp2->rb_right;
278 tmp2->rb_right = sibling;
279 parent->rb_right = tmp2;
280 if (tmp1)
281 rb_set_parent_color(tmp1, sibling,
282 RB_BLACK);
283 augment_rotate(sibling, tmp2);
284 tmp1 = sibling;
285 sibling = tmp2;
286 }
287 /*
288 * Case 4 - left rotate at parent + color flips
289 * (p and sl could be either color here.
290 * After rotation, p becomes black, s acquires
291 * p's color, and sl keeps its color)
292 *
293 * (p) (s)
294 * / \ / \
295 * N S --> P Sr
296 * / \ / \
297 * (sl) sr N (sl)
298 */
299 parent->rb_right = tmp2 = sibling->rb_left;
300 sibling->rb_left = parent;
301 rb_set_parent_color(tmp1, sibling, RB_BLACK);
302 if (tmp2)
303 rb_set_parent(tmp2, parent);
304 __rb_rotate_set_parents(parent, sibling, root,
305 RB_BLACK);
306 augment_rotate(parent, sibling);
307 break;
308 } else {
309 sibling = parent->rb_left;
310 if (rb_is_red(sibling)) {
311 /* Case 1 - right rotate at parent */
312 parent->rb_left = tmp1 = sibling->rb_right;
313 sibling->rb_right = parent;
314 rb_set_parent_color(tmp1, parent, RB_BLACK);
315 __rb_rotate_set_parents(parent, sibling, root,
316 RB_RED);
317 augment_rotate(parent, sibling);
318 sibling = tmp1;
319 }
320 tmp1 = sibling->rb_left;
321 if (!tmp1 || rb_is_black(tmp1)) {
322 tmp2 = sibling->rb_right;
323 if (!tmp2 || rb_is_black(tmp2)) {
324 /* Case 2 - sibling color flip */
325 rb_set_parent_color(sibling, parent,
326 RB_RED);
327 if (rb_is_red(parent))
328 rb_set_black(parent);
329 else {
330 node = parent;
331 parent = rb_parent(node);
332 if (parent)
333 continue;
334 }
335 break;
336 }
337 /* Case 3 - right rotate at sibling */
338 sibling->rb_right = tmp1 = tmp2->rb_left;
339 tmp2->rb_left = sibling;
340 parent->rb_left = tmp2;
341 if (tmp1)
342 rb_set_parent_color(tmp1, sibling,
343 RB_BLACK);
344 augment_rotate(sibling, tmp2);
345 tmp1 = sibling;
346 sibling = tmp2;
347 }
348 /* Case 4 - left rotate at parent + color flips */
349 parent->rb_left = tmp2 = sibling->rb_right;
350 sibling->rb_right = parent;
351 rb_set_parent_color(tmp1, sibling, RB_BLACK);
352 if (tmp2)
353 rb_set_parent(tmp2, parent);
354 __rb_rotate_set_parents(parent, sibling, root,
355 RB_BLACK);
356 augment_rotate(parent, sibling);
357 break;
358 }
359 }
360}
361
362/* Non-inline version for rb_erase_augmented() use */
363void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
364 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
365{
366 ____rb_erase_color(parent, root, augment_rotate);
367}
368
369/*
370 * Non-augmented rbtree manipulation functions.
371 *
372 * We use dummy augmented callbacks here, and have the compiler optimize them
373 * out of the rb_insert_color() and rb_erase() function definitions.
374 */
375
376static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
377static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
378static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
379
380static const struct rb_augment_callbacks dummy_callbacks = {
381 dummy_propagate, dummy_copy, dummy_rotate
382};
383
384void rb_insert_color(struct rb_node *node, struct rb_root *root)
385{
386 __rb_insert(node, root, dummy_rotate);
387}
388
389void rb_erase(struct rb_node *node, struct rb_root *root)
390{
391 struct rb_node *rebalance;
392 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
393 if (rebalance)
394 ____rb_erase_color(rebalance, root, dummy_rotate);
395}
396
397/*
398 * Augmented rbtree manipulation functions.
399 *
400 * This instantiates the same __always_inline functions as in the non-augmented
401 * case, but this time with user-defined callbacks.
402 */
403
404void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
405 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
406{
407 __rb_insert(node, root, augment_rotate);
408}
409
410/*
411 * This function returns the first node (in sort order) of the tree.
412 */
413struct rb_node *rb_first(const struct rb_root *root)
414{
415 struct rb_node *n;
416
417 n = root->rb_node;
418 if (!n)
419 return NULL;
420 while (n->rb_left)
421 n = n->rb_left;
422 return n;
423}
424
425struct rb_node *rb_last(const struct rb_root *root)
426{
427 struct rb_node *n;
428
429 n = root->rb_node;
430 if (!n)
431 return NULL;
432 while (n->rb_right)
433 n = n->rb_right;
434 return n;
435}
436
437struct rb_node *rb_next(const struct rb_node *node)
438{
439 struct rb_node *parent;
440
441 if (RB_EMPTY_NODE(node))
442 return NULL;
443
444 /*
445 * If we have a right-hand child, go down and then left as far
446 * as we can.
447 */
448 if (node->rb_right) {
449 node = node->rb_right;
450 while (node->rb_left)
451 node=node->rb_left;
452 return (struct rb_node *)node;
453 }
454
455 /*
456 * No right-hand children. Everything down and left is smaller than us,
457 * so any 'next' node must be in the general direction of our parent.
458 * Go up the tree; any time the ancestor is a right-hand child of its
459 * parent, keep going up. First time it's a left-hand child of its
460 * parent, said parent is our 'next' node.
461 */
462 while ((parent = rb_parent(node)) && node == parent->rb_right)
463 node = parent;
464
465 return parent;
466}
467
468struct rb_node *rb_prev(const struct rb_node *node)
469{
470 struct rb_node *parent;
471
472 if (RB_EMPTY_NODE(node))
473 return NULL;
474
475 /*
476 * If we have a left-hand child, go down and then right as far
477 * as we can.
478 */
479 if (node->rb_left) {
480 node = node->rb_left;
481 while (node->rb_right)
482 node=node->rb_right;
483 return (struct rb_node *)node;
484 }
485
486 /*
487 * No left-hand children. Go up till we find an ancestor which
488 * is a right-hand child of its parent.
489 */
490 while ((parent = rb_parent(node)) && node == parent->rb_left)
491 node = parent;
492
493 return parent;
494}
495
496void rb_replace_node(struct rb_node *victim, struct rb_node *new,
497 struct rb_root *root)
498{
499 struct rb_node *parent = rb_parent(victim);
500
501 /* Set the surrounding nodes to point to the replacement */
502 __rb_change_child(victim, new, parent, root);
503 if (victim->rb_left)
504 rb_set_parent(victim->rb_left, new);
505 if (victim->rb_right)
506 rb_set_parent(victim->rb_right, new);
507
508 /* Copy the pointers/colour from the victim to the replacement */
509 *new = *victim;
510}
511
512static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
513{
514 for (;;) {
515 if (node->rb_left)
516 node = node->rb_left;
517 else if (node->rb_right)
518 node = node->rb_right;
519 else
520 return (struct rb_node *)node;
521 }
522}
523
524struct rb_node *rb_next_postorder(const struct rb_node *node)
525{
526 const struct rb_node *parent;
527 if (!node)
528 return NULL;
529 parent = rb_parent(node);
530
531 /* If we're sitting on node, we've already seen our children */
532 if (parent && node == parent->rb_left && parent->rb_right) {
533 /* If we are the parent's left node, go to the parent's right
534 * node then all the way down to the left */
535 return rb_left_deepest_node(parent->rb_right);
536 } else
537 /* Otherwise we are the parent's right node, and the parent
538 * should be next */
539 return (struct rb_node *)parent;
540}
541
542struct rb_node *rb_first_postorder(const struct rb_root *root)
543{
544 if (!root->rb_node)
545 return NULL;
546
547 return rb_left_deepest_node(root->rb_node);
548}
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 6daaff652aff..7851df1490e0 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -268,7 +268,7 @@ install: install_lib
268 268
269clean: 269clean:
270 $(call QUIET_CLEAN, libtraceevent) \ 270 $(call QUIET_CLEAN, libtraceevent) \
271 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \ 271 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
272 $(RM) TRACEEVENT-CFLAGS tags TAGS 272 $(RM) TRACEEVENT-CFLAGS tags TAGS
273 273
274PHONY += force plugins 274PHONY += force plugins
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index fe50a1b34aa0..d01a0aad5a01 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -18,6 +18,8 @@ tools/arch/x86/include/asm/atomic.h
18tools/arch/x86/include/asm/rmwcc.h 18tools/arch/x86/include/asm/rmwcc.h
19tools/lib/traceevent 19tools/lib/traceevent
20tools/lib/api 20tools/lib/api
21tools/lib/hweight.c
22tools/lib/rbtree.c
21tools/lib/symbol/kallsyms.c 23tools/lib/symbol/kallsyms.c
22tools/lib/symbol/kallsyms.h 24tools/lib/symbol/kallsyms.h
23tools/lib/util/find_next_bit.c 25tools/lib/util/find_next_bit.c
@@ -44,6 +46,8 @@ tools/include/linux/kernel.h
44tools/include/linux/list.h 46tools/include/linux/list.h
45tools/include/linux/log2.h 47tools/include/linux/log2.h
46tools/include/linux/poison.h 48tools/include/linux/poison.h
49tools/include/linux/rbtree.h
50tools/include/linux/rbtree_augmented.h
47tools/include/linux/types.h 51tools/include/linux/types.h
48include/asm-generic/bitops/arch_hweight.h 52include/asm-generic/bitops/arch_hweight.h
49include/asm-generic/bitops/const_hweight.h 53include/asm-generic/bitops/const_hweight.h
@@ -51,12 +55,9 @@ include/asm-generic/bitops/fls64.h
51include/asm-generic/bitops/__fls.h 55include/asm-generic/bitops/__fls.h
52include/asm-generic/bitops/fls.h 56include/asm-generic/bitops/fls.h
53include/linux/perf_event.h 57include/linux/perf_event.h
54include/linux/rbtree.h
55include/linux/list.h 58include/linux/list.h
56include/linux/hash.h 59include/linux/hash.h
57include/linux/stringify.h 60include/linux/stringify.h
58lib/hweight.c
59lib/rbtree.c
60include/linux/swab.h 61include/linux/swab.h
61arch/*/include/asm/unistd*.h 62arch/*/include/asm/unistd*.h
62arch/*/include/uapi/asm/unistd*.h 63arch/*/include/uapi/asm/unistd*.h
@@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S
65arch/*/lib/memset*.S 66arch/*/lib/memset*.S
66include/linux/poison.h 67include/linux/poison.h
67include/linux/hw_breakpoint.h 68include/linux/hw_breakpoint.h
68include/linux/rbtree_augmented.h
69include/uapi/linux/perf_event.h 69include/uapi/linux/perf_event.h
70include/uapi/linux/const.h 70include/uapi/linux/const.h
71include/uapi/linux/swab.h 71include/uapi/linux/swab.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 7a4b549214e3..bba34636b733 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -109,9 +109,22 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
109 $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) 109 $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
110 $(Q)touch $(OUTPUT)PERF-VERSION-FILE 110 $(Q)touch $(OUTPUT)PERF-VERSION-FILE
111 111
112CC = $(CROSS_COMPILE)gcc 112# Makefiles suck: This macro sets a default value of $(2) for the
113LD ?= $(CROSS_COMPILE)ld 113# variable named by $(1), unless the variable has been set by
114AR = $(CROSS_COMPILE)ar 114# environment or command line. This is necessary for CC and AR
115# because make sets default values, so the simpler ?= approach
116# won't work as expected.
117define allow-override
118 $(if $(or $(findstring environment,$(origin $(1))),\
119 $(findstring command line,$(origin $(1)))),,\
120 $(eval $(1) = $(2)))
121endef
122
123# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
124$(call allow-override,CC,$(CROSS_COMPILE)gcc)
125$(call allow-override,AR,$(CROSS_COMPILE)ar)
126$(call allow-override,LD,$(CROSS_COMPILE)ld)
127
115PKG_CONFIG = $(CROSS_COMPILE)pkg-config 128PKG_CONFIG = $(CROSS_COMPILE)pkg-config
116 129
117RM = rm -f 130RM = rm -f
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index de165a1b9240..20b56eb987f8 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
521 goto out_child; 521 goto out_child;
522 } 522 }
523 523
524 /*
525 * Normally perf_session__new would do this, but it doesn't have the
526 * evlist.
527 */
528 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
529 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
530 rec->tool.ordered_events = false;
531 }
532
524 if (!rec->evlist->nr_groups) 533 if (!rec->evlist->nr_groups)
525 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC); 534 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
526 535
@@ -965,9 +974,11 @@ static struct record record = {
965 .tool = { 974 .tool = {
966 .sample = process_sample_event, 975 .sample = process_sample_event,
967 .fork = perf_event__process_fork, 976 .fork = perf_event__process_fork,
977 .exit = perf_event__process_exit,
968 .comm = perf_event__process_comm, 978 .comm = perf_event__process_comm,
969 .mmap = perf_event__process_mmap, 979 .mmap = perf_event__process_mmap,
970 .mmap2 = perf_event__process_mmap2, 980 .mmap2 = perf_event__process_mmap2,
981 .ordered_events = true,
971 }, 982 },
972}; 983};
973 984
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 37e301a32f43..d99d850e1444 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -343,7 +343,7 @@ static int read_counter(struct perf_evsel *counter)
343 return 0; 343 return 0;
344} 344}
345 345
346static void read_counters(bool close) 346static void read_counters(bool close_counters)
347{ 347{
348 struct perf_evsel *counter; 348 struct perf_evsel *counter;
349 349
@@ -354,7 +354,7 @@ static void read_counters(bool close)
354 if (process_counter(counter)) 354 if (process_counter(counter))
355 pr_warning("failed to process counter %s\n", counter->name); 355 pr_warning("failed to process counter %s\n", counter->name);
356 356
357 if (close) { 357 if (close_counters) {
358 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 358 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
359 thread_map__nr(evsel_list->threads)); 359 thread_map__nr(evsel_list->threads));
360 } 360 }
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ecf319728f25..6135cc07213c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
601 601
602static void display_setup_sig(void) 602static void display_setup_sig(void)
603{ 603{
604 signal(SIGSEGV, display_sig); 604 signal(SIGSEGV, sighandler_dump_stack);
605 signal(SIGFPE, display_sig); 605 signal(SIGFPE, sighandler_dump_stack);
606 signal(SIGINT, display_sig); 606 signal(SIGINT, display_sig);
607 signal(SIGQUIT, display_sig); 607 signal(SIGQUIT, display_sig);
608 signal(SIGTERM, display_sig); 608 signal(SIGTERM, display_sig);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddaee104c..d31fac19c30b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@ ifndef DESTDIR
638prefix ?= $(HOME) 638prefix ?= $(HOME)
639endif 639endif
640bindir_relative = bin 640bindir_relative = bin
641bindir = $(prefix)/$(bindir_relative) 641bindir = $(abspath $(prefix)/$(bindir_relative))
642mandir = share/man 642mandir = share/man
643infodir = share/info 643infodir = share/info
644perfexecdir = libexec/perf-core 644perfexecdir = libexec/perf-core
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 7629bef2fd79..fa67613976a8 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -48,7 +48,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
48 48
49static bool hist_browser__has_filter(struct hist_browser *hb) 49static bool hist_browser__has_filter(struct hist_browser *hb)
50{ 50{
51 return hists__has_filter(hb->hists) || hb->min_pcnt; 51 return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
52} 52}
53 53
54static int hist_browser__get_folding(struct hist_browser *browser) 54static int hist_browser__get_folding(struct hist_browser *browser)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 586a59d46022..d2d318c59b37 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -139,10 +139,10 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE
139 $(call rule_mkdir) 139 $(call rule_mkdir)
140 $(call if_changed_dep,cc_o_c) 140 $(call if_changed_dep,cc_o_c)
141 141
142$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE 142$(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
143 $(call rule_mkdir) 143 $(call rule_mkdir)
144 $(call if_changed_dep,cc_o_c) 144 $(call if_changed_dep,cc_o_c)
145 145
146$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE 146$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
147 $(call rule_mkdir) 147 $(call rule_mkdir)
148 $(call if_changed_dep,cc_o_c) 148 $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 7e7405c9b936..83d9dd96fe08 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -53,11 +53,6 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
53{ 53{
54 struct perf_event_mmap_page *pc = userpg; 54 struct perf_event_mmap_page *pc = userpg;
55 55
56#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
57 pr_err("Cannot use AUX area tracing mmaps\n");
58 return -1;
59#endif
60
61 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); 56 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
62 57
63 mm->userpg = userpg; 58 mm->userpg = userpg;
@@ -73,6 +68,11 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
73 return 0; 68 return 0;
74 } 69 }
75 70
71#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
72 pr_err("Cannot use AUX area tracing mmaps\n");
73 return -1;
74#endif
75
76 pc->aux_offset = mp->offset; 76 pc->aux_offset = mp->offset;
77 pc->aux_size = mp->len; 77 pc->aux_size = mp->len;
78 78
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h
deleted file mode 100644
index f06d89f0b867..000000000000
--- a/tools/perf/util/include/linux/rbtree.h
+++ /dev/null
@@ -1,16 +0,0 @@
1#ifndef __TOOLS_LINUX_PERF_RBTREE_H
2#define __TOOLS_LINUX_PERF_RBTREE_H
3#include <stdbool.h>
4#include "../../../../include/linux/rbtree.h"
5
6/*
7 * Handy for checking that we are not deleting an entry that is
8 * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
9 * probably should be moved to lib/rbtree.c...
10 */
11static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
12{
13 rb_erase(n, root);
14 RB_CLEAR_NODE(n);
15}
16#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h
deleted file mode 100644
index 9d6fcdf1788b..000000000000
--- a/tools/perf/util/include/linux/rbtree_augmented.h
+++ /dev/null
@@ -1,2 +0,0 @@
1#include <stdbool.h>
2#include "../../../../include/linux/rbtree_augmented.h"
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7ff682770fdb..f1a4c833121e 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
1387 event->fork.ptid); 1387 event->fork.ptid);
1388 int err = 0; 1388 int err = 0;
1389 1389
1390 if (dump_trace)
1391 perf_event__fprintf_task(event, stdout);
1392
1393 /*
1394 * There may be an existing thread that is not actually the parent,
1395 * either because we are processing events out of order, or because the
1396 * (fork) event that would have removed the thread was lost. Assume the
1397 * latter case and continue on as best we can.
1398 */
1399 if (parent->pid_ != (pid_t)event->fork.ppid) {
1400 dump_printf("removing erroneous parent thread %d/%d\n",
1401 parent->pid_, parent->tid);
1402 machine__remove_thread(machine, parent);
1403 thread__put(parent);
1404 parent = machine__findnew_thread(machine, event->fork.ppid,
1405 event->fork.ptid);
1406 }
1407
1390 /* if a thread currently exists for the thread id remove it */ 1408 /* if a thread currently exists for the thread id remove it */
1391 if (thread != NULL) { 1409 if (thread != NULL) {
1392 machine__remove_thread(machine, thread); 1410 machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
1395 1413
1396 thread = machine__findnew_thread(machine, event->fork.pid, 1414 thread = machine__findnew_thread(machine, event->fork.pid,
1397 event->fork.tid); 1415 event->fork.tid);
1398 if (dump_trace)
1399 perf_event__fprintf_task(event, stdout);
1400 1416
1401 if (thread == NULL || parent == NULL || 1417 if (thread == NULL || parent == NULL ||
1402 thread__fork(thread, parent, sample->time) < 0) { 1418 thread__fork(thread, parent, sample->time) < 0) {
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index e23ded40c79e..0766d98c5da5 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,7 +10,7 @@ util/ctype.c
10util/evlist.c 10util/evlist.c
11util/evsel.c 11util/evsel.c
12util/cpumap.c 12util/cpumap.c
13../../lib/hweight.c 13../lib/hweight.c
14util/thread_map.c 14util/thread_map.c
15util/util.c 15util/util.c
16util/xyarray.c 16util/xyarray.c
@@ -19,5 +19,5 @@ util/rblist.c
19util/stat.c 19util/stat.c
20util/strlist.c 20util/strlist.c
21util/trace-event.c 21util/trace-event.c
22../../lib/rbtree.c 22../lib/rbtree.c
23util/string.c 23util/string.c
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7bc852..2a5d8d7698ae 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
88 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 88 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
89 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 89 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
91 else if (perf_stat_evsel__is(counter, ELISION_START)) 91 else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
398 " # %5.2f%% aborted cycles ", 398 " # %5.2f%% aborted cycles ",
399 100.0 * ((total2-avg) / total)); 399 100.0 * ((total2-avg) / total));
400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) && 400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
401 avg > 0 &&
402 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 401 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
403 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 402 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
404 403
405 if (total) 404 if (avg)
406 ratio = total / avg; 405 ratio = total / avg;
407 406
408 fprintf(out, " # %8.0f cycles / transaction ", ratio); 407 fprintf(out, " # %8.0f cycles / transaction ", ratio);
409 } else if (perf_stat_evsel__is(evsel, ELISION_START) && 408 } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
410 avg > 0 &&
411 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 409 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
412 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 410 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
413 411
414 if (total) 412 if (avg)
415 ratio = total / avg; 413 ratio = total / avg;
416 414
417 fprintf(out, " # %8.0f cycles / elision ", ratio); 415 fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 48b588c6951a..60f11414bb5c 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1911,6 +1911,8 @@ int setup_list(struct strlist **list, const char *list_str,
1911 pr_err("problems parsing %s list\n", list_name); 1911 pr_err("problems parsing %s list\n", list_name);
1912 return -1; 1912 return -1;
1913 } 1913 }
1914
1915 symbol_conf.has_filter = true;
1914 return 0; 1916 return 0;
1915} 1917}
1916 1918
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index bef47ead1d9b..b98ce51af142 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -105,7 +105,8 @@ struct symbol_conf {
105 demangle_kernel, 105 demangle_kernel,
106 filter_relative, 106 filter_relative,
107 show_hist_headers, 107 show_hist_headers,
108 branch_callstack; 108 branch_callstack,
109 has_filter;
109 const char *vmlinux_name, 110 const char *vmlinux_name,
110 *kallsyms_name, 111 *kallsyms_name,
111 *source_prefix, 112 *source_prefix,
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 28c4b746baa1..0a9ae8014729 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
191 if (thread->pid_ == parent->pid_) 191 if (thread->pid_ == parent->pid_)
192 return 0; 192 return 0;
193 193
194 if (thread->mg == parent->mg) {
195 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
196 thread->pid_, thread->tid, parent->pid_, parent->tid);
197 return 0;
198 }
199
194 /* But this one is new process, copy maps. */ 200 /* But this one is new process, copy maps. */
195 for (i = 0; i < MAP__NR_TYPES; ++i) 201 for (i = 0; i < MAP__NR_TYPES; ++i)
196 if (map_groups__clone(thread->mg, parent->mg, i) < 0) 202 if (map_groups__clone(thread->mg, parent->mg, i) < 0)
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index da7646d767fe..292ae2c90e06 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -136,8 +136,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
136 if (grow) { 136 if (grow) {
137 struct thread_map *tmp; 137 struct thread_map *tmp;
138 138
139 tmp = realloc(threads, (sizeof(*threads) + 139 tmp = thread_map__realloc(threads, max_threads);
140 max_threads * sizeof(pid_t)));
141 if (tmp == NULL) 140 if (tmp == NULL)
142 goto out_free_namelist; 141 goto out_free_namelist;
143 142
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 4b89118f158d..44d440da15dc 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -236,18 +236,16 @@ static struct dso *__machine__findnew_compat(struct machine *machine,
236 const char *file_name; 236 const char *file_name;
237 struct dso *dso; 237 struct dso *dso;
238 238
239 pthread_rwlock_wrlock(&machine->dsos.lock);
240 dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true); 239 dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
241 if (dso) 240 if (dso)
242 goto out_unlock; 241 goto out;
243 242
244 file_name = vdso__get_compat_file(vdso_file); 243 file_name = vdso__get_compat_file(vdso_file);
245 if (!file_name) 244 if (!file_name)
246 goto out_unlock; 245 goto out;
247 246
248 dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name); 247 dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
249out_unlock: 248out:
250 pthread_rwlock_unlock(&machine->dsos.lock);
251 return dso; 249 return dso;
252} 250}
253 251
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 8e9b64520ec1..f56914c7929b 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -1,3 +1,6 @@
1ldflags-y += --wrap=ioremap_wt
2ldflags-y += --wrap=ioremap_wc
3ldflags-y += --wrap=devm_ioremap_nocache
1ldflags-y += --wrap=ioremap_cache 4ldflags-y += --wrap=ioremap_cache
2ldflags-y += --wrap=ioremap_nocache 5ldflags-y += --wrap=ioremap_nocache
3ldflags-y += --wrap=iounmap 6ldflags-y += --wrap=iounmap
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index c85a6f6ba559..64bfaa50831c 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -65,6 +65,21 @@ void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
65 return fallback_fn(offset, size); 65 return fallback_fn(offset, size);
66} 66}
67 67
68void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
69 resource_size_t offset, unsigned long size)
70{
71 struct nfit_test_resource *nfit_res;
72
73 rcu_read_lock();
74 nfit_res = get_nfit_res(offset);
75 rcu_read_unlock();
76 if (nfit_res)
77 return (void __iomem *) nfit_res->buf + offset
78 - nfit_res->res->start;
79 return devm_ioremap_nocache(dev, offset, size);
80}
81EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
82
68void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size) 83void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size)
69{ 84{
70 return __nfit_test_ioremap(offset, size, ioremap_cache); 85 return __nfit_test_ioremap(offset, size, ioremap_cache);
@@ -77,6 +92,18 @@ void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
77} 92}
78EXPORT_SYMBOL(__wrap_ioremap_nocache); 93EXPORT_SYMBOL(__wrap_ioremap_nocache);
79 94
95void __iomem *__wrap_ioremap_wt(resource_size_t offset, unsigned long size)
96{
97 return __nfit_test_ioremap(offset, size, ioremap_wt);
98}
99EXPORT_SYMBOL(__wrap_ioremap_wt);
100
101void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
102{
103 return __nfit_test_ioremap(offset, size, ioremap_wc);
104}
105EXPORT_SYMBOL(__wrap_ioremap_wc);
106
80void __wrap_iounmap(volatile void __iomem *addr) 107void __wrap_iounmap(volatile void __iomem *addr)
81{ 108{
82 struct nfit_test_resource *nfit_res; 109 struct nfit_test_resource *nfit_res;
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 4b69b8368de0..d0bdae40ccc9 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -128,6 +128,8 @@ struct nfit_test {
128 int num_pm; 128 int num_pm;
129 void **dimm; 129 void **dimm;
130 dma_addr_t *dimm_dma; 130 dma_addr_t *dimm_dma;
131 void **flush;
132 dma_addr_t *flush_dma;
131 void **label; 133 void **label;
132 dma_addr_t *label_dma; 134 dma_addr_t *label_dma;
133 void **spa_set; 135 void **spa_set;
@@ -155,7 +157,7 @@ static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
155 int i, rc; 157 int i, rc;
156 158
157 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) 159 if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
158 return -ENXIO; 160 return -ENOTTY;
159 161
160 /* lookup label space for the given dimm */ 162 /* lookup label space for the given dimm */
161 for (i = 0; i < ARRAY_SIZE(handle); i++) 163 for (i = 0; i < ARRAY_SIZE(handle); i++)
@@ -331,7 +333,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
331 + sizeof(struct acpi_nfit_system_address) * NUM_SPA 333 + sizeof(struct acpi_nfit_system_address) * NUM_SPA
332 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 334 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
333 + sizeof(struct acpi_nfit_control_region) * NUM_DCR 335 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
334 + sizeof(struct acpi_nfit_data_region) * NUM_BDW; 336 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
337 + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
335 int i; 338 int i;
336 339
337 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 340 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
@@ -356,6 +359,10 @@ static int nfit_test0_alloc(struct nfit_test *t)
356 if (!t->label[i]) 359 if (!t->label[i])
357 return -ENOMEM; 360 return -ENOMEM;
358 sprintf(t->label[i], "label%d", i); 361 sprintf(t->label[i], "label%d", i);
362
363 t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
364 if (!t->flush[i])
365 return -ENOMEM;
359 } 366 }
360 367
361 for (i = 0; i < NUM_DCR; i++) { 368 for (i = 0; i < NUM_DCR; i++) {
@@ -408,6 +415,7 @@ static void nfit_test0_setup(struct nfit_test *t)
408 struct acpi_nfit_system_address *spa; 415 struct acpi_nfit_system_address *spa;
409 struct acpi_nfit_control_region *dcr; 416 struct acpi_nfit_control_region *dcr;
410 struct acpi_nfit_data_region *bdw; 417 struct acpi_nfit_data_region *bdw;
418 struct acpi_nfit_flush_address *flush;
411 unsigned int offset; 419 unsigned int offset;
412 420
413 nfit_test_init_header(nfit_buf, size); 421 nfit_test_init_header(nfit_buf, size);
@@ -831,6 +839,39 @@ static void nfit_test0_setup(struct nfit_test *t)
831 bdw->capacity = DIMM_SIZE; 839 bdw->capacity = DIMM_SIZE;
832 bdw->start_address = 0; 840 bdw->start_address = 0;
833 841
842 offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
843 /* flush0 (dimm0) */
844 flush = nfit_buf + offset;
845 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
846 flush->header.length = sizeof(struct acpi_nfit_flush_address);
847 flush->device_handle = handle[0];
848 flush->hint_count = 1;
849 flush->hint_address[0] = t->flush_dma[0];
850
851 /* flush1 (dimm1) */
852 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
853 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
854 flush->header.length = sizeof(struct acpi_nfit_flush_address);
855 flush->device_handle = handle[1];
856 flush->hint_count = 1;
857 flush->hint_address[0] = t->flush_dma[1];
858
859 /* flush2 (dimm2) */
860 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
861 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
862 flush->header.length = sizeof(struct acpi_nfit_flush_address);
863 flush->device_handle = handle[2];
864 flush->hint_count = 1;
865 flush->hint_address[0] = t->flush_dma[2];
866
867 /* flush3 (dimm3) */
868 flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
869 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
870 flush->header.length = sizeof(struct acpi_nfit_flush_address);
871 flush->device_handle = handle[3];
872 flush->hint_count = 1;
873 flush->hint_address[0] = t->flush_dma[3];
874
834 acpi_desc = &t->acpi_desc; 875 acpi_desc = &t->acpi_desc;
835 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); 876 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
836 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 877 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
@@ -933,6 +974,10 @@ static int nfit_test_probe(struct platform_device *pdev)
933 GFP_KERNEL); 974 GFP_KERNEL);
934 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 975 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
935 GFP_KERNEL); 976 GFP_KERNEL);
977 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
978 GFP_KERNEL);
979 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
980 GFP_KERNEL);
936 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 981 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
937 GFP_KERNEL); 982 GFP_KERNEL);
938 nfit_test->label_dma = devm_kcalloc(dev, num, 983 nfit_test->label_dma = devm_kcalloc(dev, num,
@@ -943,7 +988,8 @@ static int nfit_test_probe(struct platform_device *pdev)
943 sizeof(dma_addr_t), GFP_KERNEL); 988 sizeof(dma_addr_t), GFP_KERNEL);
944 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 989 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
945 && nfit_test->label_dma && nfit_test->dcr 990 && nfit_test->label_dma && nfit_test->dcr
946 && nfit_test->dcr_dma) 991 && nfit_test->dcr_dma && nfit_test->flush
992 && nfit_test->flush_dma)
947 /* pass */; 993 /* pass */;
948 else 994 else
949 return -ENOMEM; 995 return -ENOMEM;
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 7f0c756993af..3d7dc6afc3f8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
191 if (res > 0) { 191 if (res > 0) {
192 atomic_set(&requeued, 1); 192 atomic_set(&requeued, 1);
193 break; 193 break;
194 } else if (res > 0) { 194 } else if (res < 0) {
195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno); 195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
196 ret = RET_ERROR; 196 ret = RET_ERROR;
197 break; 197 break;
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 620e37f741b8..1dd087da6f31 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
155 list_add_tail(&kvg->node, &kv->group_list); 155 list_add_tail(&kvg->node, &kv->group_list);
156 kvg->vfio_group = vfio_group; 156 kvg->vfio_group = vfio_group;
157 157
158 kvm_arch_start_assignment(dev->kvm);
159
158 mutex_unlock(&kv->lock); 160 mutex_unlock(&kv->lock);
159 161
160 kvm_vfio_update_coherency(dev); 162 kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
190 break; 192 break;
191 } 193 }
192 194
195 kvm_arch_end_assignment(dev->kvm);
196
193 mutex_unlock(&kv->lock); 197 mutex_unlock(&kv->lock);
194 198
195 kvm_vfio_group_put_external_user(vfio_group); 199 kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
239 kvm_vfio_group_put_external_user(kvg->vfio_group); 243 kvm_vfio_group_put_external_user(kvg->vfio_group);
240 list_del(&kvg->node); 244 list_del(&kvg->node);
241 kfree(kvg); 245 kfree(kvg);
246 kvm_arch_end_assignment(dev->kvm);
242 } 247 }
243 248
244 kvm_vfio_update_coherency(dev); 249 kvm_vfio_update_coherency(dev);